From 7cd270771315d27f3effec1daeda1992bf457f30 Mon Sep 17 00:00:00 2001 From: Chris Church Date: Thu, 14 Nov 2013 22:55:03 -0500 Subject: [PATCH] Updated all vendored third-party packages. --- awx/lib/site-packages/README | 45 +- awx/lib/site-packages/amqp/__init__.py | 3 +- awx/lib/site-packages/amqp/channel.py | 53 +- awx/lib/site-packages/amqp/connection.py | 8 +- awx/lib/site-packages/amqp/exceptions.py | 4 +- awx/lib/site-packages/amqp/method_framing.py | 6 +- awx/lib/site-packages/amqp/protocol.py | 13 + awx/lib/site-packages/amqp/transport.py | 106 +- awx/lib/site-packages/amqp/utils.py | 38 + awx/lib/site-packages/billiard/__init__.py | 18 +- awx/lib/site-packages/billiard/_connection.py | 71 +- .../site-packages/billiard/_connection3.py | 955 +++ awx/lib/site-packages/billiard/_ext.py | 9 +- awx/lib/site-packages/billiard/_reduction.py | 244 + awx/lib/site-packages/billiard/_reduction3.py | 249 + awx/lib/site-packages/billiard/_win.py | 2 +- awx/lib/site-packages/billiard/common.py | 42 +- awx/lib/site-packages/billiard/compat.py | 68 +- awx/lib/site-packages/billiard/connection.py | 22 +- .../site-packages/billiard/dummy/__init__.py | 12 +- .../billiard/dummy/connection.py | 2 +- awx/lib/site-packages/billiard/einfo.py | 2 +- awx/lib/site-packages/billiard/five.py | 189 + awx/lib/site-packages/billiard/forking.py | 149 +- awx/lib/site-packages/billiard/heap.py | 9 +- awx/lib/site-packages/billiard/managers.py | 49 +- awx/lib/site-packages/billiard/pool.py | 840 +- awx/lib/site-packages/billiard/process.py | 34 +- awx/lib/site-packages/billiard/queues.py | 50 +- awx/lib/site-packages/billiard/reduction.py | 196 +- .../site-packages/billiard/sharedctypes.py | 12 +- awx/lib/site-packages/billiard/synchronize.py | 17 +- .../site-packages/billiard/tests/__init__.py | 9 +- .../billiard/tests/test_common.py | 1 - awx/lib/site-packages/billiard/tests/utils.py | 9 +- awx/lib/site-packages/billiard/util.py | 225 +- awx/lib/site-packages/boto/__init__.py | 25 +- awx/lib/site-packages/boto/auth.py | 16 +- .../boto/cloudsearch/document.py | 7 +- .../site-packages/boto/cloudsearch/search.py | 31 +- .../site-packages/boto/cloudtrail/__init__.py | 48 + .../boto/cloudtrail/exceptions.py | 86 + .../site-packages/boto/cloudtrail/layer1.py | 309 + awx/lib/site-packages/boto/connection.py | 112 +- awx/lib/site-packages/boto/dynamodb/item.py | 2 +- awx/lib/site-packages/boto/dynamodb/types.py | 4 + .../site-packages/boto/dynamodb2/layer1.py | 16 +- awx/lib/site-packages/boto/dynamodb2/table.py | 41 +- .../boto/ec2/autoscale/__init__.py | 50 +- .../site-packages/boto/ec2/autoscale/group.py | 16 +- .../boto/ec2/autoscale/launchconfig.py | 13 +- .../site-packages/boto/ec2/autoscale/tag.py | 6 +- .../boto/ec2/cloudwatch/alarm.py | 15 +- awx/lib/site-packages/boto/ec2/connection.py | 37 +- .../site-packages/boto/ec2/elb/__init__.py | 28 +- .../boto/ec2/elb/loadbalancer.py | 2 +- awx/lib/site-packages/boto/ec2/instance.py | 8 - .../boto/ec2/reservedinstance.py | 11 +- .../site-packages/boto/ec2/securitygroup.py | 9 +- .../boto/elastictranscoder/layer1.py | 4 +- awx/lib/site-packages/boto/emr/__init__.py | 14 +- awx/lib/site-packages/boto/emr/connection.py | 164 +- awx/lib/site-packages/boto/emr/emrobject.py | 296 + .../site-packages/boto/glacier/__init__.py | 3 + awx/lib/site-packages/boto/gs/bucket.py | 24 +- .../boto/gs/resumable_upload_handler.py | 2 +- awx/lib/site-packages/boto/handler.py | 2 + awx/lib/site-packages/boto/iam/connection.py | 6 +- awx/lib/site-packages/boto/manage/cmdshell.py | 6 +- awx/lib/site-packages/boto/mws/connection.py | 61 +- awx/lib/site-packages/boto/mws/response.py | 142 +- awx/lib/site-packages/boto/opsworks/layer1.py | 243 +- .../site-packages/boto/redshift/__init__.py | 6 + .../site-packages/boto/redshift/exceptions.py | 269 + awx/lib/site-packages/boto/redshift/layer1.py | 941 ++- .../site-packages/boto/route53/connection.py | 54 +- awx/lib/site-packages/boto/s3/bucket.py | 44 +- awx/lib/site-packages/boto/sns/connection.py | 3 +- awx/lib/site-packages/boto/sqs/connection.py | 9 +- awx/lib/site-packages/boto/sqs/message.py | 5 +- awx/lib/site-packages/boto/swf/layer2.py | 18 +- awx/lib/site-packages/boto/vpc/__init__.py | 462 +- .../site-packages/boto/vpc/customergateway.py | 8 +- awx/lib/site-packages/boto/vpc/networkacl.py | 164 + awx/lib/site-packages/celery/__compat__.py | 208 - awx/lib/site-packages/celery/__init__.py | 126 +- awx/lib/site-packages/celery/__main__.py | 32 +- awx/lib/site-packages/celery/_state.py | 21 +- awx/lib/site-packages/celery/app/__init__.py | 35 +- awx/lib/site-packages/celery/app/abstract.py | 63 - awx/lib/site-packages/celery/app/amqp.py | 141 +- .../site-packages/celery/app/annotations.py | 15 +- awx/lib/site-packages/celery/app/base.py | 285 +- awx/lib/site-packages/celery/app/builtins.py | 200 +- awx/lib/site-packages/celery/app/control.py | 54 +- awx/lib/site-packages/celery/app/defaults.py | 78 +- awx/lib/site-packages/celery/app/log.py | 57 +- awx/lib/site-packages/celery/app/registry.py | 17 +- awx/lib/site-packages/celery/app/routes.py | 27 +- awx/lib/site-packages/celery/app/task.py | 308 +- awx/lib/site-packages/celery/app/trace.py | 399 + awx/lib/site-packages/celery/app/utils.py | 115 +- awx/lib/site-packages/celery/apps/beat.py | 80 +- awx/lib/site-packages/celery/apps/worker.py | 333 +- .../site-packages/celery/backends/__init__.py | 19 +- awx/lib/site-packages/celery/backends/amqp.py | 208 +- awx/lib/site-packages/celery/backends/base.py | 331 +- .../site-packages/celery/backends/cache.py | 19 +- .../celery/backends/cassandra.py | 24 +- .../celery/backends/couchbase.py | 116 + .../celery/backends/database/__init__.py | 24 +- .../celery/backends/database/a805d4bd.py | 71 - .../celery/backends/database/dfd042c7.py | 50 - .../celery/backends/database/models.py | 11 +- .../celery/backends/database/session.py | 25 +- .../site-packages/celery/backends/mongodb.py | 101 +- .../site-packages/celery/backends/redis.py | 14 +- awx/lib/site-packages/celery/backends/rpc.py | 64 + awx/lib/site-packages/celery/beat.py | 111 +- awx/lib/site-packages/celery/bin/__init__.py | 5 + .../celery/bin/{camqadm.py => amqp.py} | 117 +- awx/lib/site-packages/celery/bin/base.py | 323 +- .../celery/bin/{celerybeat.py => beat.py} | 36 +- awx/lib/site-packages/celery/bin/celery.py | 586 +- awx/lib/site-packages/celery/bin/celeryctl.py | 16 - .../celery/bin/celeryd_detach.py | 48 +- .../celery/bin/{celeryev.py => events.py} | 48 +- awx/lib/site-packages/celery/bin/graph.py | 191 + .../celery/bin/{celeryd_multi.py => multi.py} | 284 +- .../celery/bin/{celeryd.py => worker.py} | 110 +- awx/lib/site-packages/celery/bootsteps.py | 420 + awx/lib/site-packages/celery/canvas.py | 260 +- .../celery/concurrency/__init__.py | 5 +- .../celery/concurrency/asynpool.py | 1188 +++ .../site-packages/celery/concurrency/base.py | 68 +- .../celery/concurrency/eventlet.py | 24 +- .../celery/concurrency/gevent.py | 35 +- .../celery/concurrency/prefork.py | 180 + .../celery/concurrency/processes/__init__.py | 148 - .../site-packages/celery/concurrency/solo.py | 2 + .../celery/concurrency/threads.py | 4 +- .../site-packages/celery/contrib/abortable.py | 18 +- .../site-packages/celery/contrib/batches.py | 21 +- .../site-packages/celery/contrib/bundles.py | 65 - .../site-packages/celery/contrib/methods.py | 14 +- .../site-packages/celery/contrib/migrate.py | 64 +- awx/lib/site-packages/celery/contrib/rdb.py | 51 +- .../site-packages/celery/datastructures.py | 324 +- .../site-packages/celery/events/__init__.py | 221 +- .../site-packages/celery/events/cursesmon.py | 297 +- awx/lib/site-packages/celery/events/dumper.py | 44 +- .../site-packages/celery/events/snapshot.py | 21 +- awx/lib/site-packages/celery/events/state.py | 227 +- awx/lib/site-packages/celery/exceptions.py | 56 +- awx/lib/site-packages/celery/five.py | 387 + .../{tests/utilities => fixups}/__init__.py | 0 awx/lib/site-packages/celery/fixups/django.py | 204 + .../site-packages/celery/loaders/__init__.py | 2 + awx/lib/site-packages/celery/loaders/app.py | 2 + awx/lib/site-packages/celery/loaders/base.py | 121 +- .../site-packages/celery/loaders/default.py | 10 +- awx/lib/site-packages/celery/local.py | 119 +- awx/lib/site-packages/celery/platforms.py | 224 +- awx/lib/site-packages/celery/result.py | 231 +- awx/lib/site-packages/celery/schedules.py | 222 +- .../site-packages/celery/security/__init__.py | 50 +- .../celery/security/certificate.py | 26 +- awx/lib/site-packages/celery/security/key.py | 11 +- .../celery/security/serialization.py | 56 +- .../site-packages/celery/security/utils.py | 11 +- awx/lib/site-packages/celery/signals.py | 44 +- awx/lib/site-packages/celery/states.py | 12 + awx/lib/site-packages/celery/task/__init__.py | 4 +- awx/lib/site-packages/celery/task/base.py | 79 +- awx/lib/site-packages/celery/task/http.py | 118 +- awx/lib/site-packages/celery/task/sets.py | 33 +- awx/lib/site-packages/celery/task/trace.py | 423 +- .../site-packages/celery/tests/__init__.py | 29 +- .../celery/tests/app/test_amqp.py | 142 +- .../celery/tests/app/test_annotations.py | 39 +- .../celery/tests/app/test_app.py | 418 +- .../celery/tests/app/test_beat.py | 206 +- .../celery/tests/app/test_builtins.py | 209 +- .../celery/tests/app/test_celery.py | 4 +- .../celery/tests/app/test_control.py | 106 +- .../celery/tests/app/test_defaults.py | 21 +- .../celery/tests/app/test_exceptions.py | 35 + .../celery/tests/app/test_loaders.py | 174 +- .../celery/tests/app/test_log.py | 273 +- .../celery/tests/app/test_registry.py | 78 + .../celery/tests/app/test_routes.py | 160 +- .../celery/tests/app/test_schedules.py | 717 ++ .../celery/tests/app/test_utils.py | 25 +- .../celery/tests/backends/test_amqp.py | 168 +- .../celery/tests/backends/test_backends.py | 35 +- .../celery/tests/backends/test_base.py | 299 +- .../celery/tests/backends/test_cache.py | 90 +- .../celery/tests/backends/test_cassandra.py | 51 +- .../celery/tests/backends/test_couchbase.py | 136 + .../celery/tests/backends/test_database.py | 101 +- .../celery/tests/backends/test_mongodb.py | 107 +- .../celery/tests/backends/test_redis.py | 103 +- .../celery/tests/backends/test_rpc.py | 75 + .../celery/tests/bin/proj/__init__.py | 5 + .../celery/tests/bin/proj/app.py | 5 + .../bin/{test_camqadm.py => test_amqp.py} | 49 +- .../celery/tests/bin/test_base.py | 231 +- .../bin/{test_celerybeat.py => test_beat.py} | 86 +- .../celery/tests/bin/test_celery.py | 363 +- .../celery/tests/bin/test_celeryd_detach.py | 34 +- .../celery/tests/bin/test_celeryevdump.py | 35 +- .../bin/{test_celeryev.py => test_events.py} | 47 +- .../{test_celeryd_multi.py => test_multi.py} | 185 +- .../bin/{test_celeryd.py => test_worker.py} | 325 +- .../celery/tests/{utils.py => case.py} | 347 +- awx/lib/site-packages/celery/tests/compat.py | 85 - .../tests/compat_modules/test_compat.py | 82 + .../test_compat_utils.py} | 23 +- .../tests/compat_modules/test_decorators.py | 18 +- .../{tasks => compat_modules}/test_http.py | 42 +- .../tests/compat_modules/test_messaging.py | 12 +- .../{tasks => compat_modules}/test_sets.py | 144 +- .../tests/concurrency/test_concurrency.py | 33 +- .../celery/tests/concurrency/test_eventlet.py | 32 +- .../celery/tests/concurrency/test_gevent.py | 81 +- .../celery/tests/concurrency/test_pool.py | 15 +- .../celery/tests/concurrency/test_prefork.py | 320 + .../tests/concurrency/test_processes.py | 191 - .../celery/tests/concurrency/test_solo.py | 4 +- .../celery/tests/concurrency/test_threads.py | 7 +- awx/lib/site-packages/celery/tests/config.py | 54 - .../celery/tests/contrib/test_abortable.py | 50 +- .../celery/tests/contrib/test_methods.py | 34 + .../celery/tests/contrib/test_migrate.py | 233 +- .../celery/tests/contrib/test_rdb.py | 17 +- .../celery/tests/events/test_cursesmon.py | 10 +- .../celery/tests/events/test_events.py | 95 +- .../celery/tests/events/test_snapshot.py | 42 +- .../celery/tests/events/test_state.py | 191 +- .../tests/fixups}/__init__.py | 0 .../celery/tests/fixups/test_django.py | 277 + .../celery/tests/functional/case.py | 28 +- .../celery/tests/functional/tasks.py | 4 +- .../celery/tests/security/__init__.py | 55 +- .../celery/tests/security/case.py | 8 +- .../celery/tests/security/test_certificate.py | 7 +- .../celery/tests/security/test_security.py | 86 +- .../tests/security/test_serialization.py | 13 +- .../celery/tests/slow/test_buckets.py | 346 - .../celery/tests/tasks/test_canvas.py | 258 +- .../celery/tests/tasks/test_chord.py | 210 +- .../celery/tests/tasks/test_context.py | 10 +- .../celery/tests/tasks/test_registry.py | 73 - .../celery/tests/tasks/test_result.py | 434 +- .../celery/tests/tasks/test_states.py | 2 +- .../celery/tests/tasks/test_tasks.py | 1280 +-- .../celery/tests/tasks/test_trace.py | 182 +- .../celery/tests/utilities/test_info.py | 48 - .../tests/utilities/test_serialization.py | 19 - .../celery/tests/utilities/test_timeutils.py | 91 - .../celery/tests/utilities/test_utils.py | 165 - .../celery/tests/utils/__init__.py | 0 .../test_datastructures.py | 248 +- .../{utilities => utils}/test_dispatcher.py | 17 +- .../{utilities => utils}/test_encoding.py | 5 +- .../celery/tests/utils/test_functional.py | 180 + .../{utilities => utils}/test_imports.py | 25 +- .../tests/{utilities => utils}/test_local.py | 81 +- .../tests/{utilities => utils}/test_mail.py | 27 +- .../tests/{utilities => utils}/test_pickle.py | 6 +- .../{utilities => utils}/test_platforms.py | 157 +- .../{utilities => utils}/test_saferef.py | 37 +- .../celery/tests/utils/test_serialization.py | 42 + .../celery/tests/utils/test_sysinfo.py | 33 + .../tests/{utilities => utils}/test_term.py | 40 +- .../celery/tests/utils/test_text.py | 88 + .../celery/tests/utils/test_threads.py | 107 + .../tests/{utilities => utils}/test_timer2.py | 67 +- .../celery/tests/utils/test_timeutils.py | 264 + .../celery/tests/utils/test_utils.py | 108 + .../celery/tests/worker/test_autoreload.py | 76 +- .../celery/tests/worker/test_autoscale.py | 71 +- .../celery/tests/worker/test_bootsteps.py | 339 +- .../celery/tests/worker/test_components.py | 38 + .../celery/tests/worker/test_consumer.py | 454 + .../celery/tests/worker/test_control.py | 446 +- .../celery/tests/worker/test_heartbeat.py | 35 +- .../celery/tests/worker/test_hub.py | 183 +- .../celery/tests/worker/test_loops.py | 403 + .../celery/tests/worker/test_mediator.py | 113 - .../celery/tests/worker/test_request.py | 982 ++- .../celery/tests/worker/test_revoke.py | 4 +- .../celery/tests/worker/test_state.py | 78 +- .../celery/tests/worker/test_strategy.py | 139 + .../celery/tests/worker/test_worker.py | 1018 ++- .../site-packages/celery/utils/__init__.py | 190 +- awx/lib/site-packages/celery/utils/compat.py | 173 +- awx/lib/site-packages/celery/utils/debug.py | 112 +- .../celery/utils/dispatch/__init__.py | 4 +- .../celery/utils/dispatch/saferef.py | 65 +- .../celery/utils/dispatch/signal.py | 22 +- .../site-packages/celery/utils/functional.py | 83 +- awx/lib/site-packages/celery/utils/imports.py | 15 +- awx/lib/site-packages/celery/utils/iso8601.py | 76 + awx/lib/site-packages/celery/utils/log.py | 116 +- awx/lib/site-packages/celery/utils/mail.py | 67 +- awx/lib/site-packages/celery/utils/objects.py | 37 + .../celery/utils/serialization.py | 85 +- awx/lib/site-packages/celery/utils/sysinfo.py | 45 + awx/lib/site-packages/celery/utils/term.py | 29 +- awx/lib/site-packages/celery/utils/text.py | 14 +- awx/lib/site-packages/celery/utils/threads.py | 65 +- awx/lib/site-packages/celery/utils/timer2.py | 264 +- .../site-packages/celery/utils/timeutils.py | 174 +- .../site-packages/celery/worker/__init__.py | 671 +- .../site-packages/celery/worker/autoreload.py | 59 +- .../site-packages/celery/worker/autoscale.py | 73 +- .../site-packages/celery/worker/bootsteps.py | 211 - .../site-packages/celery/worker/buckets.py | 391 - .../site-packages/celery/worker/components.py | 247 + .../site-packages/celery/worker/consumer.py | 1300 ++- .../site-packages/celery/worker/control.py | 349 +- .../site-packages/celery/worker/heartbeat.py | 17 +- awx/lib/site-packages/celery/worker/hub.py | 222 - awx/lib/site-packages/celery/worker/job.py | 246 +- awx/lib/site-packages/celery/worker/loops.py | 104 + .../site-packages/celery/worker/mediator.py | 80 - awx/lib/site-packages/celery/worker/pidbox.py | 114 + awx/lib/site-packages/celery/worker/state.py | 137 +- .../site-packages/celery/worker/strategy.py | 77 +- awx/lib/site-packages/dateutil/__init__.py | 2 +- awx/lib/site-packages/dateutil/parser.py | 40 +- .../dateutil/zoneinfo/__init__.py | 25 +- .../dateutil/zoneinfo/zoneinfo--latest.tar.gz | Bin 86784 -> 198578 bytes .../django_auth_ldap/__init__.py | 4 +- .../site-packages/django_auth_ldap/backend.py | 92 +- .../site-packages/django_auth_ldap/config.py | 76 +- awx/lib/site-packages/django_auth_ldap/dn.py | 1 + .../site-packages/django_auth_ldap/tests.py | 865 +- .../django_extensions/__init__.py | 2 +- .../django_extensions/admin/__init__.py | 2 +- .../django_extensions/admin/widgets.py | 2 - .../django_extensions/db/fields/__init__.py | 5 + .../django_extensions/db/fields/json.py | 22 +- .../jobs/minutely/__init__.py | 0 .../django_extensions/management/color.py | 3 + .../management/commands/clean_pyc.py | 7 - .../management/commands/compile_pyc.py | 8 - .../management/commands/graph_models.py | 104 +- .../management/commands/pipchecker.py | 58 +- .../commands/print_user_for_session.py | 33 +- .../management/commands/reset_db.py | 118 +- .../management/commands/runjob.py | 8 - .../management/commands/runjobs.py | 8 - .../management/commands/runprofileserver.py | 31 +- .../management/commands/runscript.py | 19 +- .../management/commands/runserver_plus.py | 70 +- .../management/commands/sqldiff.py | 56 +- .../management/commands/sync_media_s3.py | 64 +- .../management/commands/sync_s3.py | 359 + .../management/commands/syncdata.py | 9 - .../django_extensions/management/jobs.py | 6 +- .../django_extensions/management/modelviz.py | 73 +- .../django_extensions/management/shells.py | 58 + .../django_extensions/management/utils.py | 18 +- .../css/jquery.autocomplete.css | 63 +- .../js/jquery.autocomplete.js | 1900 +++-- .../static/django_extensions/js/jquery.js | 3558 -------- .../django_extensions/graph_models/body.html | 34 - .../graph_models/digraph.dot | 26 + .../django_extensions/graph_models/head.html | 15 - .../django_extensions/graph_models/label.dot | 30 + .../django_extensions/graph_models/rel.html | 15 - .../graph_models/relation.dot | 10 + .../django_extensions/graph_models/tail.html | 1 - .../widgets/foreignkey_searchinput.html | 2 +- .../templatetags/indent_text.py | 55 + .../tests/test_dumpscript.py | 25 +- awx/lib/site-packages/djcelery/__init__.py | 9 +- awx/lib/site-packages/djcelery/admin.py | 45 +- awx/lib/site-packages/djcelery/admin_utils.py | 11 +- awx/lib/site-packages/djcelery/app.py | 2 +- .../site-packages/djcelery/backends/cache.py | 4 +- .../djcelery/backends/database.py | 7 +- awx/lib/site-packages/djcelery/common.py | 6 +- .../djcelery/contrib/test_runner.py | 73 +- awx/lib/site-packages/djcelery/humanize.py | 20 +- awx/lib/site-packages/djcelery/loaders.py | 19 +- .../site-packages/djcelery/management/base.py | 17 +- .../djcelery/management/commands/camqadm.py | 25 - .../djcelery/management/commands/celery.py | 4 +- .../management/commands/celerybeat.py | 6 +- .../djcelery/management/commands/celerycam.py | 6 +- .../djcelery/management/commands/celeryctl.py | 28 - .../djcelery/management/commands/celeryd.py | 6 +- .../management/commands/celeryd_detach.py | 2 +- .../management/commands/celeryd_multi.py | 12 +- .../djcelery/management/commands/celeryev.py | 42 - .../djcelery/management/commands/celerymon.py | 2 +- .../management/commands/djcelerymon.py | 6 +- awx/lib/site-packages/djcelery/managers.py | 9 +- awx/lib/site-packages/djcelery/models.py | 194 +- awx/lib/site-packages/djcelery/mon.py | 2 +- .../site-packages/djcelery/monproj/urls.py | 6 +- awx/lib/site-packages/djcelery/picklefield.py | 7 +- awx/lib/site-packages/djcelery/schedulers.py | 78 +- awx/lib/site-packages/djcelery/snapshot.py | 55 +- .../djcelery/static/djcelery/style.css | 4 + awx/lib/site-packages/djcelery/tests/req.py | 10 +- .../tests/test_backends/test_cache.py | 30 +- .../tests/test_backends/test_database.py | 13 +- .../djcelery/tests/test_discovery.py | 9 +- .../djcelery/tests/test_loaders.py | 5 +- .../djcelery/tests/test_models.py | 2 +- .../djcelery/tests/test_schedulers.py | 19 +- .../djcelery/tests/test_snapshot.py | 26 +- .../djcelery/tests/test_views.py | 18 +- .../djcelery/tests/test_worker_job.py | 9 +- awx/lib/site-packages/djcelery/tests/utils.py | 2 +- .../djcelery/transport/__init__.py | 2 +- awx/lib/site-packages/djcelery/urls.py | 4 +- awx/lib/site-packages/djcelery/utils.py | 10 +- awx/lib/site-packages/djcelery/views.py | 15 +- awx/lib/site-packages/fdpexpect.py | 82 - .../funtests/tests/test_multiprocessing.py | 17 +- awx/lib/site-packages/iso8601/__init__.py | 2 +- awx/lib/site-packages/iso8601/iso8601.py | 144 +- awx/lib/site-packages/iso8601/test_iso8601.py | 180 +- awx/lib/site-packages/jsonfield/VERSION | 2 +- awx/lib/site-packages/jsonfield/fields.py | 6 +- awx/lib/site-packages/jsonfield/forms.py | 3 +- .../jsonfield/templatetags/jsonify.py | 3 +- awx/lib/site-packages/jsonfield/widgets.py | 3 +- awx/lib/site-packages/keyring/backend.py | 6 +- .../site-packages/keyring/backends/Gnome.py | 3 +- .../site-packages/keyring/backends/Google.py | 16 +- .../site-packages/keyring/backends/OS_X.py | 5 +- .../keyring/backends/SecretService.py | 4 - .../site-packages/keyring/backends/Windows.py | 3 +- .../site-packages/keyring/backends/file.py | 17 +- .../site-packages/keyring/backends/kwallet.py | 3 +- .../site-packages/keyring/backends/multi.py | 2 +- awx/lib/site-packages/keyring/cli.py | 4 +- awx/lib/site-packages/keyring/core.py | 71 +- awx/lib/site-packages/keyring/credentials.py | 5 +- awx/lib/site-packages/keyring/errors.py | 4 +- .../site-packages/keyring/getpassbackend.py | 1 - awx/lib/site-packages/keyring/py27compat.py | 31 +- .../keyring/tests/backends/test_Gnome.py | 2 +- .../keyring/tests/backends/test_Google.py | 82 +- .../keyring/tests/backends/test_OS_X.py | 7 +- .../tests/backends/test_SecretService.py | 2 +- .../keyring/tests/backends/test_Windows.py | 12 +- .../keyring/tests/backends/test_file.py | 2 +- .../keyring/tests/backends/test_keyczar.py | 10 +- .../keyring/tests/backends/test_kwallet.py | 4 +- awx/lib/site-packages/keyring/tests/mocks.py | 17 +- .../keyring/tests/test_backend.py | 13 +- .../site-packages/keyring/tests/test_cli.py | 7 +- .../site-packages/keyring/tests/test_core.py | 2 +- .../site-packages/keyring/tests/test_util.py | 3 +- .../site-packages/keyring/util/properties.py | 4 +- awx/lib/site-packages/kombu/__init__.py | 15 +- awx/lib/site-packages/kombu/abstract.py | 9 +- awx/lib/site-packages/kombu/async/__init__.py | 15 + awx/lib/site-packages/kombu/async/hub.py | 384 + .../site-packages/kombu/async/semaphore.py | 108 + awx/lib/site-packages/kombu/async/timer.py | 227 + awx/lib/site-packages/kombu/clocks.py | 67 +- awx/lib/site-packages/kombu/common.py | 96 +- awx/lib/site-packages/kombu/compat.py | 3 +- awx/lib/site-packages/kombu/compression.py | 2 +- awx/lib/site-packages/kombu/connection.py | 229 +- awx/lib/site-packages/kombu/entity.py | 58 +- awx/lib/site-packages/kombu/exceptions.py | 17 +- awx/lib/site-packages/kombu/five.py | 206 + awx/lib/site-packages/kombu/log.py | 43 +- awx/lib/site-packages/kombu/message.py | 135 + awx/lib/site-packages/kombu/messaging.py | 44 +- awx/lib/site-packages/kombu/mixins.py | 57 +- awx/lib/site-packages/kombu/pidbox.py | 41 +- awx/lib/site-packages/kombu/pools.py | 12 +- awx/lib/site-packages/kombu/serialization.py | 124 +- awx/lib/site-packages/kombu/simple.py | 19 +- .../kombu/tests/async/__init__.py | 0 .../kombu/tests/async/test_hub.py | 33 + .../kombu/tests/{utils.py => case.py} | 57 +- awx/lib/site-packages/kombu/tests/compat.py | 87 - awx/lib/site-packages/kombu/tests/mocks.py | 8 +- .../site-packages/kombu/tests/test_clocks.py | 54 +- .../site-packages/kombu/tests/test_common.py | 115 +- .../site-packages/kombu/tests/test_compat.py | 33 +- .../kombu/tests/test_compression.py | 7 +- .../kombu/tests/test_connection.py | 136 +- .../kombu/tests/test_entities.py | 53 +- awx/lib/site-packages/kombu/tests/test_log.py | 70 +- .../kombu/tests/test_messaging.py | 89 +- .../site-packages/kombu/tests/test_mixins.py | 241 + .../site-packages/kombu/tests/test_pidbox.py | 59 +- .../site-packages/kombu/tests/test_pools.py | 14 +- .../kombu/tests/test_serialization.py | 204 +- .../site-packages/kombu/tests/test_simple.py | 14 +- awx/lib/site-packages/kombu/tests/test_syn.py | 58 + .../kombu/tests/transport/test_amqplib.py | 7 +- .../kombu/tests/transport/test_base.py | 52 +- .../kombu/tests/transport/test_filesystem.py | 7 +- .../kombu/tests/transport/test_librabbitmq.py | 150 + .../kombu/tests/transport/test_memory.py | 28 +- .../kombu/tests/transport/test_mongodb.py | 6 +- .../kombu/tests/transport/test_pyamqp.py | 35 +- .../kombu/tests/transport/test_redis.py | 407 +- .../kombu/tests/transport/test_sqlalchemy.py | 42 +- .../kombu/tests/transport/test_transport.py | 31 +- .../tests/transport/virtual/test_base.py | 41 +- .../tests/transport/virtual/test_exchange.py | 6 +- .../transport/virtual/test_scheduling.py | 5 +- .../kombu/tests/utilities/test_functional.py | 55 - .../kombu/tests/utils/__init__.py | 0 .../{utilities => utils}/test_amq_manager.py | 10 +- .../tests/{utilities => utils}/test_debug.py | 11 +- .../{utilities => utils}/test_encoding.py | 41 +- .../kombu/tests/utils/test_functional.py | 63 + .../kombu/tests/{ => utils}/test_utils.py | 62 +- awx/lib/site-packages/kombu/transport/SLMQ.py | 187 + awx/lib/site-packages/kombu/transport/SQS.py | 46 +- .../site-packages/kombu/transport/__init__.py | 31 +- .../site-packages/kombu/transport/amqplib.py | 43 +- awx/lib/site-packages/kombu/transport/base.py | 204 +- .../kombu/transport/beanstalk.py | 24 +- .../site-packages/kombu/transport/couchdb.py | 32 +- .../kombu/transport/django/__init__.py | 12 +- .../commands/clean_kombu_messages.py | 2 +- .../django/migrations/0001_initial.py | 6 +- .../kombu/transport/filesystem.py | 24 +- .../kombu/transport/librabbitmq.py | 37 +- .../site-packages/kombu/transport/memory.py | 16 +- .../site-packages/kombu/transport/mongodb.py | 77 +- .../site-packages/kombu/transport/pyamqp.py | 55 +- awx/lib/site-packages/kombu/transport/pyro.py | 99 + .../site-packages/kombu/transport/redis.py | 174 +- .../kombu/transport/sqlalchemy/__init__.py | 76 +- .../kombu/transport/sqlalchemy/models.py | 37 +- .../kombu/transport/virtual/__init__.py | 219 +- .../kombu/transport/virtual/exchange.py | 6 +- .../kombu/transport/virtual/scheduling.py | 4 +- awx/lib/site-packages/kombu/transport/zmq.py | 72 +- .../kombu/transport/zookeeper.py | 168 +- awx/lib/site-packages/kombu/utils/__init__.py | 71 +- .../site-packages/kombu/utils/amq_manager.py | 2 +- awx/lib/site-packages/kombu/utils/compat.py | 135 +- awx/lib/site-packages/kombu/utils/debug.py | 12 +- awx/lib/site-packages/kombu/utils/encoding.py | 61 +- awx/lib/site-packages/kombu/utils/eventio.py | 24 +- awx/lib/site-packages/kombu/utils/finalize.py | 85 - .../site-packages/kombu/utils/functional.py | 55 +- awx/lib/site-packages/kombu/utils/limits.py | 18 +- awx/lib/site-packages/kombu/utils/url.py | 11 +- awx/lib/site-packages/pbr/packaging.py | 122 +- awx/lib/site-packages/pbr/testr_command.py | 9 +- awx/lib/site-packages/pbr/tests/__init__.py | 133 - awx/lib/site-packages/pbr/tests/base.py | 133 + .../site-packages/pbr/tests/test_commands.py | 4 +- awx/lib/site-packages/pbr/tests/test_core.py | 6 +- awx/lib/site-packages/pbr/tests/test_files.py | 6 +- awx/lib/site-packages/pbr/tests/test_hooks.py | 11 +- .../site-packages/pbr/tests/test_packaging.py | 13 +- awx/lib/site-packages/pbr/tests/test_setup.py | 47 +- .../site-packages/pbr/tests/test_version.py | 4 +- awx/lib/site-packages/{ => pexpect}/ANSI.py | 159 +- awx/lib/site-packages/{ => pexpect}/FSM.py | 129 +- .../{pexpect.py => pexpect/__init__.py} | 1375 +-- awx/lib/site-packages/pexpect/fdpexpect.py | 97 + awx/lib/site-packages/pexpect/psh.py | 148 + awx/lib/site-packages/{ => pexpect}/pxssh.py | 124 +- awx/lib/site-packages/{ => pexpect}/screen.py | 164 +- awx/lib/site-packages/pyrax/__init__.py | 38 +- awx/lib/site-packages/pyrax/autoscale.py | 446 +- awx/lib/site-packages/pyrax/base_identity.py | 17 +- .../site-packages/pyrax/cf_wrapper/client.py | 396 +- .../pyrax/cf_wrapper/container.py | 108 +- .../pyrax/cf_wrapper/storage_object.py | 23 +- awx/lib/site-packages/pyrax/client.py | 67 +- .../site-packages/pyrax/cloudblockstorage.py | 183 +- awx/lib/site-packages/pyrax/clouddatabases.py | 364 +- awx/lib/site-packages/pyrax/clouddns.py | 104 +- .../site-packages/pyrax/cloudloadbalancers.py | 92 +- .../site-packages/pyrax/cloudmonitoring.py | 56 +- awx/lib/site-packages/pyrax/cloudnetworks.py | 26 +- awx/lib/site-packages/pyrax/exceptions.py | 25 +- .../pyrax/identity/rax_identity.py | 43 +- awx/lib/site-packages/pyrax/manager.py | 50 +- awx/lib/site-packages/pyrax/queueing.py | 755 ++ awx/lib/site-packages/pyrax/utils.py | 98 +- awx/lib/site-packages/pyrax/version.py | 2 +- awx/lib/site-packages/pytz/__init__.py | 10 +- awx/lib/site-packages/pytz/lazy.py | 72 +- .../pytz/zoneinfo/Africa/Casablanca | Bin 1558 -> 1679 bytes .../pytz/zoneinfo/Africa/El_Aaiun | Bin 194 -> 1509 bytes .../site-packages/pytz/zoneinfo/Africa/Juba | Bin 669 -> 669 bytes .../pytz/zoneinfo/Africa/Tripoli | Bin 1350 -> 641 bytes .../pytz/zoneinfo/America/Anguilla | Bin 156 -> 156 bytes .../pytz/zoneinfo/America/Araguaina | Bin 1609 -> 882 bytes .../pytz/zoneinfo/America/Argentina/San_Luis | Bin 1125 -> 1145 bytes .../site-packages/pytz/zoneinfo/America/Aruba | Bin 194 -> 194 bytes .../pytz/zoneinfo/America/Cayman | Bin 177 -> 177 bytes .../pytz/zoneinfo/America/Dominica | Bin 156 -> 156 bytes .../pytz/zoneinfo/America/Eirunepe | Bin 640 -> 670 bytes .../pytz/zoneinfo/America/Godthab | Bin 8296 -> 1863 bytes .../pytz/zoneinfo/America/Grand_Turk | Bin 1871 -> 1871 bytes .../pytz/zoneinfo/America/Grenada | Bin 156 -> 156 bytes .../pytz/zoneinfo/America/Guadeloupe | Bin 156 -> 156 bytes .../pytz/zoneinfo/America/Jamaica | Bin 481 -> 481 bytes .../pytz/zoneinfo/America/Marigot | Bin 156 -> 156 bytes .../pytz/zoneinfo/America/Montserrat | Bin 156 -> 156 bytes .../pytz/zoneinfo/America/Porto_Acre | Bin 612 -> 642 bytes .../pytz/zoneinfo/America/Rio_Branco | Bin 612 -> 642 bytes .../pytz/zoneinfo/America/Santiago | Bin 9227 -> 2505 bytes .../pytz/zoneinfo/America/St_Barthelemy | Bin 156 -> 156 bytes .../pytz/zoneinfo/America/St_Kitts | Bin 156 -> 156 bytes .../pytz/zoneinfo/America/St_Lucia | Bin 177 -> 156 bytes .../pytz/zoneinfo/America/St_Thomas | Bin 156 -> 156 bytes .../pytz/zoneinfo/America/St_Vincent | Bin 177 -> 156 bytes .../pytz/zoneinfo/America/Tortola | Bin 156 -> 156 bytes .../pytz/zoneinfo/America/Virgin | Bin 156 -> 156 bytes .../pytz/zoneinfo/Antarctica/McMurdo | Bin 2001 -> 2434 bytes .../pytz/zoneinfo/Antarctica/Palmer | Bin 8762 -> 2040 bytes .../pytz/zoneinfo/Antarctica/South_Pole | Bin 2001 -> 2434 bytes .../site-packages/pytz/zoneinfo/Asia/Amman | Bin 1863 -> 1191 bytes awx/lib/site-packages/pytz/zoneinfo/Asia/Dili | Bin 293 -> 295 bytes awx/lib/site-packages/pytz/zoneinfo/Asia/Gaza | Bin 9025 -> 2287 bytes .../site-packages/pytz/zoneinfo/Asia/Hebron | Bin 9053 -> 2315 bytes .../site-packages/pytz/zoneinfo/Asia/Jakarta | Bin 344 -> 344 bytes .../site-packages/pytz/zoneinfo/Asia/Jayapura | Bin 225 -> 225 bytes .../pytz/zoneinfo/Asia/Jerusalem | Bin 8981 -> 2239 bytes .../site-packages/pytz/zoneinfo/Asia/Makassar | Bin 263 -> 266 bytes .../pytz/zoneinfo/Asia/Pontianak | Bin 359 -> 361 bytes .../site-packages/pytz/zoneinfo/Asia/Tehran | Bin 1638 -> 1647 bytes .../site-packages/pytz/zoneinfo/Asia/Tel_Aviv | Bin 8981 -> 2239 bytes .../pytz/zoneinfo/Asia/Ujung_Pandang | Bin 263 -> 266 bytes .../site-packages/pytz/zoneinfo/Brazil/Acre | Bin 612 -> 642 bytes .../pytz/zoneinfo/Chile/Continental | Bin 9227 -> 2505 bytes .../pytz/zoneinfo/Chile/EasterIsland | Bin 8989 -> 2269 bytes .../pytz/zoneinfo/Europe/Busingen | Bin 1892 -> 1892 bytes .../site-packages/pytz/zoneinfo/Europe/Vaduz | Bin 1799 -> 1892 bytes .../site-packages/pytz/zoneinfo/Europe/Zurich | Bin 1892 -> 1892 bytes awx/lib/site-packages/pytz/zoneinfo/Iran | Bin 1638 -> 1647 bytes awx/lib/site-packages/pytz/zoneinfo/Israel | Bin 8981 -> 2239 bytes awx/lib/site-packages/pytz/zoneinfo/Jamaica | Bin 481 -> 481 bytes awx/lib/site-packages/pytz/zoneinfo/Libya | Bin 1350 -> 641 bytes .../pytz/zoneinfo/Pacific/Easter | Bin 8989 -> 2269 bytes .../site-packages/pytz/zoneinfo/Pacific/Fiji | Bin 7782 -> 1064 bytes .../pytz/zoneinfo/Pacific/Johnston | Bin 119 -> 250 bytes .../site-packages/pytz/zoneinfo/iso3166.tab | 6 +- awx/lib/site-packages/pytz/zoneinfo/zone.tab | 32 +- .../rackspace_auth_openstack/plugin.py | 4 +- awx/lib/site-packages/requests/__init__.py | 6 +- awx/lib/site-packages/requests/adapters.py | 15 +- awx/lib/site-packages/requests/cacert.pem | 7448 ++++++++++------- awx/lib/site-packages/requests/cookies.py | 10 +- awx/lib/site-packages/requests/exceptions.py | 4 + awx/lib/site-packages/requests/models.py | 25 +- .../requests/packages/charade/__init__.py | 34 + .../requests/packages/charade/__main__.py | 7 + .../requests/packages/charade/jpcntx.py | 2 +- .../requests/packages/charade/latin1prober.py | 2 +- .../packages/charade/universaldetector.py | 12 +- .../requests/packages/urllib3/connection.py | 107 + .../packages/urllib3/connectionpool.py | 205 +- .../packages/urllib3/contrib/pyopenssl.py | 2 + .../requests/packages/urllib3/util.py | 39 +- awx/lib/site-packages/requests/sessions.py | 14 +- awx/lib/site-packages/requests/utils.py | 15 +- .../setuptools/command/egg_info.py | 37 +- .../site-packages/setuptools/command/sdist.py | 116 +- awx/lib/site-packages/setuptools/compat.py | 2 + .../setuptools/script template (dev).py | 8 +- .../site-packages/setuptools/ssl_support.py | 86 +- awx/lib/site-packages/setuptools/svn_utils.py | 529 ++ .../setuptools/tests/environment.py | 104 + .../setuptools/tests/test_egg_info.py | 34 + .../setuptools/tests/test_sdist.py | 145 +- .../setuptools/tests/test_svn.py | 223 + awx/lib/site-packages/setuptools/version.py | 2 +- awx/lib/site-packages/simplejson/__init__.py | 2 +- awx/lib/site-packages/simplejson/scanner.py | 3 + .../simplejson/tests/test_errors.py | 18 +- awx/lib/site-packages/south/__init__.py | 2 +- awx/lib/site-packages/south/db/firebird.py | 29 +- awx/lib/site-packages/south/db/mysql.py | 23 +- awx/lib/site-packages/south/db/oracle.py | 55 +- awx/lib/site-packages/south/db/sqlite3.py | 37 +- awx/lib/site-packages/south/exceptions.py | 7 +- .../site-packages/south/hacks/django_1_0.py | 9 +- .../management/commands/datamigration.py | 2 +- .../management/commands/schemamigration.py | 2 +- .../site-packages/south/migration/__init__.py | 6 +- .../south/migration/migrators.py | 21 +- .../site-packages/south/modelsinspector.py | 1 + awx/lib/site-packages/south/signals.py | 6 +- awx/lib/site-packages/south/test_shim.py | 6 + awx/lib/site-packages/south/tests/__init__.py | 1 + .../south/tests/autodetection.py | 9 +- awx/lib/site-packages/south/tests/db.py | 33 +- .../site-packages/south/tests/db_firebird.py | 39 + awx/lib/site-packages/south/v2.py | 8 +- awx/lib/site-packages/swiftclient/__init__.py | 2 +- awx/lib/site-packages/swiftclient/version.py | 15 +- ...jango-1.5.4.tar.gz => Django-1.5.5.tar.gz} | Bin 8050758 -> 8060441 bytes requirements/South-0.8.2.tar.gz | Bin 95435 -> 0 bytes requirements/South-0.8.3.tar.gz | Bin 0 -> 97650 bytes requirements/amqp-1.2.1.tar.gz | Bin 74050 -> 0 bytes requirements/amqp-1.3.3.tar.gz | Bin 0 -> 75574 bytes requirements/astroid-1.0.0.tar.gz | Bin 111108 -> 0 bytes requirements/astroid-1.0.1.tar.gz | Bin 0 -> 112597 bytes requirements/astroid-5ed6266.tar.gz | Bin 113855 -> 0 bytes requirements/billiard-2.7.3.32.tar.gz | Bin 134769 -> 0 bytes requirements/billiard-3.3.0.6.tar.gz | Bin 0 -> 146649 bytes requirements/boto-2.13.3.tar.gz | Bin 1063143 -> 0 bytes requirements/boto-2.17.0.tar.gz | Bin 0 -> 5993878 bytes requirements/celery-3.0.23.tar.gz | Bin 1096420 -> 0 bytes requirements/celery-3.1.3.tar.gz | Bin 0 -> 1269548 bytes requirements/coverage-3.6.tar.gz | Bin 232067 -> 0 bytes requirements/coverage-3.7.tar.gz | Bin 0 -> 283419 bytes requirements/dev.txt | 1 - requirements/dev_local.txt | 60 +- requirements/distribute-0.6.45.tar.gz | Bin 723456 -> 0 bytes requirements/django-auth-ldap-1.1.4.tar.gz | Bin 39252 -> 0 bytes requirements/django-auth-ldap-1.1.6.tar.gz | Bin 0 -> 39321 bytes requirements/django-celery-3.0.23.tar.gz | Bin 78103 -> 0 bytes requirements/django-celery-3.1.1.tar.gz | Bin 0 -> 75014 bytes .../django-debug-toolbar-0.11.0.tar.gz | Bin 0 -> 206083 bytes .../django-debug-toolbar-0.9.4.tar.gz | Bin 150062 -> 0 bytes requirements/django-devserver-0.6.2.tar.gz | Bin 16598 -> 0 bytes requirements/django-devserver-0.7.0.tar.gz | Bin 0 -> 16917 bytes requirements/django-extensions-1.2.2.tar.gz | Bin 309337 -> 0 bytes requirements/django-extensions-1.2.5.tar.gz | Bin 0 -> 285378 bytes requirements/django-jsonfield-0.9.10.tar.gz | Bin 10990 -> 0 bytes requirements/django-jsonfield-0.9.11.tar.gz | Bin 0 -> 10882 bytes requirements/iso8601-0.1.4.tar.gz | Bin 4685 -> 0 bytes requirements/iso8601-0.1.8.tar.gz | Bin 0 -> 7775 bytes requirements/keyring-3.0.5.zip | Bin 84181 -> 0 bytes requirements/keyring-3.2.zip | Bin 0 -> 85773 bytes requirements/kombu-2.5.14.tar.gz | Bin 307139 -> 0 bytes requirements/kombu-3.0.4.tar.gz | Bin 0 -> 329216 bytes requirements/pbr-0.5.21.tar.gz | Bin 123902 -> 0 bytes requirements/pbr-0.5.23.tar.gz | Bin 0 -> 128773 bytes requirements/pexpect-2.4.tar.gz | Bin 113251 -> 0 bytes requirements/pexpect-3.0.tar.gz | Bin 0 -> 146662 bytes requirements/prod_local.txt | 46 +- requirements/pyrax-1.5.0.tar.gz | Bin 814136 -> 0 bytes requirements/pyrax-1.6.2.tar.gz | Bin 0 -> 260945 bytes requirements/python-dateutil-2.1.tar.gz | Bin 152334 -> 0 bytes requirements/python-dateutil-2.2.tar.gz | Bin 0 -> 259085 bytes requirements/python-swiftclient-1.6.0.tar.gz | Bin 71606 -> 0 bytes requirements/python-swiftclient-1.8.0.tar.gz | Bin 0 -> 72672 bytes requirements/pytz-2013.8.tar.bz2 | Bin 0 -> 177290 bytes requirements/pytz-2013d.tar.bz2 | Bin 204865 -> 0 bytes .../rackspace-auth-openstack-1.0.tar.gz | Bin 6583 -> 0 bytes .../rackspace-auth-openstack-1.1.tar.gz | Bin 0 -> 6567 bytes requirements/requests-2.0.0.tar.gz | Bin 362994 -> 0 bytes requirements/requests-2.0.1.tar.gz | Bin 0 -> 412648 bytes requirements/setuptools-1.1.6.tar.gz | Bin 681597 -> 0 bytes requirements/setuptools-1.3.2.tar.gz | Bin 0 -> 789412 bytes requirements/simplejson-3.3.0.tar.gz | Bin 67250 -> 0 bytes requirements/simplejson-3.3.1.tar.gz | Bin 0 -> 67371 bytes requirements/sqlparse-0.1.10.tar.gz | Bin 0 -> 53322 bytes tox.ini | 24 +- 767 files changed, 45175 insertions(+), 28364 deletions(-) create mode 100644 awx/lib/site-packages/amqp/protocol.py create mode 100644 awx/lib/site-packages/billiard/_connection3.py create mode 100644 awx/lib/site-packages/billiard/_reduction.py create mode 100644 awx/lib/site-packages/billiard/_reduction3.py create mode 100644 awx/lib/site-packages/billiard/five.py create mode 100644 awx/lib/site-packages/boto/cloudtrail/__init__.py create mode 100644 awx/lib/site-packages/boto/cloudtrail/exceptions.py create mode 100644 awx/lib/site-packages/boto/cloudtrail/layer1.py create mode 100644 awx/lib/site-packages/boto/vpc/networkacl.py delete mode 100644 awx/lib/site-packages/celery/__compat__.py delete mode 100644 awx/lib/site-packages/celery/app/abstract.py create mode 100644 awx/lib/site-packages/celery/app/trace.py create mode 100644 awx/lib/site-packages/celery/backends/couchbase.py delete mode 100644 awx/lib/site-packages/celery/backends/database/a805d4bd.py delete mode 100644 awx/lib/site-packages/celery/backends/database/dfd042c7.py create mode 100644 awx/lib/site-packages/celery/backends/rpc.py rename awx/lib/site-packages/celery/bin/{camqadm.py => amqp.py} (80%) rename awx/lib/site-packages/celery/bin/{celerybeat.py => beat.py} (67%) delete mode 100644 awx/lib/site-packages/celery/bin/celeryctl.py rename awx/lib/site-packages/celery/bin/{celeryev.py => events.py} (70%) create mode 100644 awx/lib/site-packages/celery/bin/graph.py rename awx/lib/site-packages/celery/bin/{celeryd_multi.py => multi.py} (63%) rename awx/lib/site-packages/celery/bin/{celeryd.py => worker.py} (62%) create mode 100644 awx/lib/site-packages/celery/bootsteps.py create mode 100644 awx/lib/site-packages/celery/concurrency/asynpool.py create mode 100644 awx/lib/site-packages/celery/concurrency/prefork.py delete mode 100644 awx/lib/site-packages/celery/concurrency/processes/__init__.py delete mode 100644 awx/lib/site-packages/celery/contrib/bundles.py create mode 100644 awx/lib/site-packages/celery/five.py rename awx/lib/site-packages/celery/{tests/utilities => fixups}/__init__.py (100%) create mode 100644 awx/lib/site-packages/celery/fixups/django.py create mode 100644 awx/lib/site-packages/celery/tests/app/test_exceptions.py create mode 100644 awx/lib/site-packages/celery/tests/app/test_registry.py create mode 100644 awx/lib/site-packages/celery/tests/app/test_schedules.py create mode 100644 awx/lib/site-packages/celery/tests/backends/test_couchbase.py create mode 100644 awx/lib/site-packages/celery/tests/backends/test_rpc.py create mode 100644 awx/lib/site-packages/celery/tests/bin/proj/__init__.py create mode 100644 awx/lib/site-packages/celery/tests/bin/proj/app.py rename awx/lib/site-packages/celery/tests/bin/{test_camqadm.py => test_amqp.py} (80%) rename awx/lib/site-packages/celery/tests/bin/{test_celerybeat.py => test_beat.py} (61%) rename awx/lib/site-packages/celery/tests/bin/{test_celeryev.py => test_events.py} (53%) rename awx/lib/site-packages/celery/tests/bin/{test_celeryd_multi.py => test_multi.py} (74%) rename awx/lib/site-packages/celery/tests/bin/{test_celeryd.py => test_worker.py} (71%) rename awx/lib/site-packages/celery/tests/{utils.py => case.py} (60%) delete mode 100644 awx/lib/site-packages/celery/tests/compat.py create mode 100644 awx/lib/site-packages/celery/tests/compat_modules/test_compat.py rename awx/lib/site-packages/celery/tests/{utilities/test_compat.py => compat_modules/test_compat_utils.py} (69%) rename awx/lib/site-packages/celery/tests/{tasks => compat_modules}/test_http.py (81%) rename awx/lib/site-packages/celery/tests/{tasks => compat_modules}/test_sets.py (55%) create mode 100644 awx/lib/site-packages/celery/tests/concurrency/test_prefork.py delete mode 100644 awx/lib/site-packages/celery/tests/concurrency/test_processes.py delete mode 100644 awx/lib/site-packages/celery/tests/config.py create mode 100644 awx/lib/site-packages/celery/tests/contrib/test_methods.py rename awx/lib/site-packages/{kombu/tests/utilities => celery/tests/fixups}/__init__.py (100%) create mode 100644 awx/lib/site-packages/celery/tests/fixups/test_django.py delete mode 100644 awx/lib/site-packages/celery/tests/slow/test_buckets.py delete mode 100644 awx/lib/site-packages/celery/tests/tasks/test_registry.py delete mode 100644 awx/lib/site-packages/celery/tests/utilities/test_info.py delete mode 100644 awx/lib/site-packages/celery/tests/utilities/test_serialization.py delete mode 100644 awx/lib/site-packages/celery/tests/utilities/test_timeutils.py delete mode 100644 awx/lib/site-packages/celery/tests/utilities/test_utils.py create mode 100644 awx/lib/site-packages/celery/tests/utils/__init__.py rename awx/lib/site-packages/celery/tests/{utilities => utils}/test_datastructures.py (55%) rename awx/lib/site-packages/celery/tests/{utilities => utils}/test_dispatcher.py (92%) rename awx/lib/site-packages/celery/tests/{utilities => utils}/test_encoding.py (77%) create mode 100644 awx/lib/site-packages/celery/tests/utils/test_functional.py rename awx/lib/site-packages/celery/tests/{utilities => utils}/test_imports.py (61%) rename awx/lib/site-packages/celery/tests/{utilities => utils}/test_local.py (80%) rename awx/lib/site-packages/celery/tests/{utilities => utils}/test_mail.py (66%) rename awx/lib/site-packages/celery/tests/{utilities => utils}/test_pickle.py (92%) rename awx/lib/site-packages/celery/tests/{utilities => utils}/test_platforms.py (82%) rename awx/lib/site-packages/celery/tests/{utilities => utils}/test_saferef.py (68%) create mode 100644 awx/lib/site-packages/celery/tests/utils/test_serialization.py create mode 100644 awx/lib/site-packages/celery/tests/utils/test_sysinfo.py rename awx/lib/site-packages/celery/tests/{utilities => utils}/test_term.py (65%) create mode 100644 awx/lib/site-packages/celery/tests/utils/test_text.py create mode 100644 awx/lib/site-packages/celery/tests/utils/test_threads.py rename awx/lib/site-packages/celery/tests/{utilities => utils}/test_timer2.py (73%) create mode 100644 awx/lib/site-packages/celery/tests/utils/test_timeutils.py create mode 100644 awx/lib/site-packages/celery/tests/utils/test_utils.py create mode 100644 awx/lib/site-packages/celery/tests/worker/test_components.py create mode 100644 awx/lib/site-packages/celery/tests/worker/test_consumer.py create mode 100644 awx/lib/site-packages/celery/tests/worker/test_loops.py delete mode 100644 awx/lib/site-packages/celery/tests/worker/test_mediator.py create mode 100644 awx/lib/site-packages/celery/tests/worker/test_strategy.py create mode 100644 awx/lib/site-packages/celery/utils/iso8601.py create mode 100644 awx/lib/site-packages/celery/utils/objects.py create mode 100644 awx/lib/site-packages/celery/utils/sysinfo.py delete mode 100644 awx/lib/site-packages/celery/worker/bootsteps.py delete mode 100644 awx/lib/site-packages/celery/worker/buckets.py create mode 100644 awx/lib/site-packages/celery/worker/components.py delete mode 100644 awx/lib/site-packages/celery/worker/hub.py create mode 100644 awx/lib/site-packages/celery/worker/loops.py delete mode 100644 awx/lib/site-packages/celery/worker/mediator.py create mode 100644 awx/lib/site-packages/celery/worker/pidbox.py create mode 100644 awx/lib/site-packages/django_extensions/jobs/minutely/__init__.py create mode 100644 awx/lib/site-packages/django_extensions/management/commands/sync_s3.py delete mode 100644 awx/lib/site-packages/django_extensions/static/django_extensions/js/jquery.js delete mode 100644 awx/lib/site-packages/django_extensions/templates/django_extensions/graph_models/body.html create mode 100644 awx/lib/site-packages/django_extensions/templates/django_extensions/graph_models/digraph.dot delete mode 100644 awx/lib/site-packages/django_extensions/templates/django_extensions/graph_models/head.html create mode 100644 awx/lib/site-packages/django_extensions/templates/django_extensions/graph_models/label.dot delete mode 100644 awx/lib/site-packages/django_extensions/templates/django_extensions/graph_models/rel.html create mode 100644 awx/lib/site-packages/django_extensions/templates/django_extensions/graph_models/relation.dot delete mode 100644 awx/lib/site-packages/django_extensions/templates/django_extensions/graph_models/tail.html create mode 100644 awx/lib/site-packages/django_extensions/templatetags/indent_text.py delete mode 100644 awx/lib/site-packages/djcelery/management/commands/camqadm.py delete mode 100644 awx/lib/site-packages/djcelery/management/commands/celeryctl.py delete mode 100644 awx/lib/site-packages/djcelery/management/commands/celeryev.py create mode 100644 awx/lib/site-packages/djcelery/static/djcelery/style.css delete mode 100644 awx/lib/site-packages/fdpexpect.py create mode 100644 awx/lib/site-packages/kombu/async/__init__.py create mode 100644 awx/lib/site-packages/kombu/async/hub.py create mode 100644 awx/lib/site-packages/kombu/async/semaphore.py create mode 100644 awx/lib/site-packages/kombu/async/timer.py create mode 100644 awx/lib/site-packages/kombu/five.py create mode 100644 awx/lib/site-packages/kombu/message.py create mode 100644 awx/lib/site-packages/kombu/tests/async/__init__.py create mode 100644 awx/lib/site-packages/kombu/tests/async/test_hub.py rename awx/lib/site-packages/kombu/tests/{utils.py => case.py} (70%) delete mode 100644 awx/lib/site-packages/kombu/tests/compat.py create mode 100644 awx/lib/site-packages/kombu/tests/test_mixins.py create mode 100644 awx/lib/site-packages/kombu/tests/test_syn.py create mode 100644 awx/lib/site-packages/kombu/tests/transport/test_librabbitmq.py delete mode 100644 awx/lib/site-packages/kombu/tests/utilities/test_functional.py create mode 100644 awx/lib/site-packages/kombu/tests/utils/__init__.py rename awx/lib/site-packages/kombu/tests/{utilities => utils}/test_amq_manager.py (83%) rename awx/lib/site-packages/kombu/tests/{utilities => utils}/test_debug.py (89%) rename awx/lib/site-packages/kombu/tests/{utilities => utils}/test_encoding.py (65%) create mode 100644 awx/lib/site-packages/kombu/tests/utils/test_functional.py rename awx/lib/site-packages/kombu/tests/{ => utils}/test_utils.py (88%) create mode 100644 awx/lib/site-packages/kombu/transport/SLMQ.py create mode 100644 awx/lib/site-packages/kombu/transport/pyro.py delete mode 100644 awx/lib/site-packages/kombu/utils/finalize.py create mode 100644 awx/lib/site-packages/pbr/tests/base.py rename awx/lib/site-packages/{ => pexpect}/ANSI.py (68%) rename awx/lib/site-packages/{ => pexpect}/FSM.py (79%) rename awx/lib/site-packages/{pexpect.py => pexpect/__init__.py} (56%) create mode 100644 awx/lib/site-packages/pexpect/fdpexpect.py create mode 100644 awx/lib/site-packages/pexpect/psh.py rename awx/lib/site-packages/{ => pexpect}/pxssh.py (78%) rename awx/lib/site-packages/{ => pexpect}/screen.py (70%) create mode 100644 awx/lib/site-packages/pyrax/queueing.py create mode 100644 awx/lib/site-packages/requests/packages/charade/__main__.py create mode 100644 awx/lib/site-packages/requests/packages/urllib3/connection.py create mode 100644 awx/lib/site-packages/setuptools/svn_utils.py create mode 100644 awx/lib/site-packages/setuptools/tests/environment.py create mode 100644 awx/lib/site-packages/setuptools/tests/test_svn.py create mode 100644 awx/lib/site-packages/south/test_shim.py create mode 100644 awx/lib/site-packages/south/tests/db_firebird.py rename requirements/{Django-1.5.4.tar.gz => Django-1.5.5.tar.gz} (52%) delete mode 100644 requirements/South-0.8.2.tar.gz create mode 100644 requirements/South-0.8.3.tar.gz delete mode 100644 requirements/amqp-1.2.1.tar.gz create mode 100644 requirements/amqp-1.3.3.tar.gz delete mode 100644 requirements/astroid-1.0.0.tar.gz create mode 100644 requirements/astroid-1.0.1.tar.gz delete mode 100644 requirements/astroid-5ed6266.tar.gz delete mode 100644 requirements/billiard-2.7.3.32.tar.gz create mode 100644 requirements/billiard-3.3.0.6.tar.gz delete mode 100644 requirements/boto-2.13.3.tar.gz create mode 100644 requirements/boto-2.17.0.tar.gz delete mode 100644 requirements/celery-3.0.23.tar.gz create mode 100644 requirements/celery-3.1.3.tar.gz delete mode 100644 requirements/coverage-3.6.tar.gz create mode 100644 requirements/coverage-3.7.tar.gz delete mode 100644 requirements/distribute-0.6.45.tar.gz delete mode 100644 requirements/django-auth-ldap-1.1.4.tar.gz create mode 100644 requirements/django-auth-ldap-1.1.6.tar.gz delete mode 100644 requirements/django-celery-3.0.23.tar.gz create mode 100644 requirements/django-celery-3.1.1.tar.gz create mode 100644 requirements/django-debug-toolbar-0.11.0.tar.gz delete mode 100644 requirements/django-debug-toolbar-0.9.4.tar.gz delete mode 100644 requirements/django-devserver-0.6.2.tar.gz create mode 100644 requirements/django-devserver-0.7.0.tar.gz delete mode 100644 requirements/django-extensions-1.2.2.tar.gz create mode 100644 requirements/django-extensions-1.2.5.tar.gz delete mode 100644 requirements/django-jsonfield-0.9.10.tar.gz create mode 100644 requirements/django-jsonfield-0.9.11.tar.gz delete mode 100644 requirements/iso8601-0.1.4.tar.gz create mode 100644 requirements/iso8601-0.1.8.tar.gz delete mode 100644 requirements/keyring-3.0.5.zip create mode 100644 requirements/keyring-3.2.zip delete mode 100644 requirements/kombu-2.5.14.tar.gz create mode 100644 requirements/kombu-3.0.4.tar.gz delete mode 100644 requirements/pbr-0.5.21.tar.gz create mode 100644 requirements/pbr-0.5.23.tar.gz delete mode 100644 requirements/pexpect-2.4.tar.gz create mode 100644 requirements/pexpect-3.0.tar.gz delete mode 100644 requirements/pyrax-1.5.0.tar.gz create mode 100644 requirements/pyrax-1.6.2.tar.gz delete mode 100644 requirements/python-dateutil-2.1.tar.gz create mode 100644 requirements/python-dateutil-2.2.tar.gz delete mode 100644 requirements/python-swiftclient-1.6.0.tar.gz create mode 100644 requirements/python-swiftclient-1.8.0.tar.gz create mode 100644 requirements/pytz-2013.8.tar.bz2 delete mode 100644 requirements/pytz-2013d.tar.bz2 delete mode 100644 requirements/rackspace-auth-openstack-1.0.tar.gz create mode 100644 requirements/rackspace-auth-openstack-1.1.tar.gz delete mode 100644 requirements/requests-2.0.0.tar.gz create mode 100644 requirements/requests-2.0.1.tar.gz delete mode 100644 requirements/setuptools-1.1.6.tar.gz create mode 100644 requirements/setuptools-1.3.2.tar.gz delete mode 100644 requirements/simplejson-3.3.0.tar.gz create mode 100644 requirements/simplejson-3.3.1.tar.gz create mode 100644 requirements/sqlparse-0.1.10.tar.gz diff --git a/awx/lib/site-packages/README b/awx/lib/site-packages/README index 9364a4ce64..6ea4b6aa9d 100644 --- a/awx/lib/site-packages/README +++ b/awx/lib/site-packages/README @@ -1,50 +1,51 @@ Local versions of third-party packages required by AWX. Package names and versions are listed below, along with notes on which files are included. -amqp==1.2.1 (amqp/*) +amqp==1.3.3 (amqp/*) anyjson==0.3.3 (anyjson/*) argparse==1.2.1 (argparse.py, needed for Python 2.6 support) Babel==1.3 (babel/*, excluded bin/pybabel) -billiard==2.7.3.32 (billiard/*, funtests/*, excluded _billiard.so) -boto==2.13.3 (boto/*, excluded bin/asadmin, bin/bundle_image, bin/cfadmin, +billiard==3.3.0.6 (billiard/*, funtests/*, excluded _billiard.so) +boto==2.17.0 (boto/*, excluded bin/asadmin, bin/bundle_image, bin/cfadmin, bin/cq, bin/cwutil, bin/dynamodb_dump, bin/dynamodb_load, bin/elbadmin, bin/fetch_file, bin/glacier, bin/instance_events, bin/kill_instance, bin/launch_instance, bin/list_instances, bin/lss3, bin/mturk, bin/pyami_sendmail, bin/route53, bin/s3put, bin/sdbadmin, bin/taskadmin) -celery==3.0.23 (celery/*, excluded bin/celery* and bin/camqadm) +celery==3.1.3 (celery/*, excluded bin/celery*) d2to1==0.2.11 (d2to1/*) distribute==0.7.3 (no files) -django-auth-ldap==1.1.4 (django_auth_ldap/*) -django-celery==3.0.23 (djcelery/*, excluded bin/djcelerymon) -django-extensions==1.2.2 (django_extensions/*) -django-jsonfield==0.9.10 (jsonfield/*) +django-auth-ldap==1.1.6 (django_auth_ldap/*) +django-celery==3.1.1 (djcelery/*) +django-extensions==1.2.5 (django_extensions/*) +django-jsonfield==0.9.11 (jsonfield/*) django-taggit==0.10 (taggit/*) djangorestframework==2.3.8 (rest_framework/*) httplib2==0.8 (httplib2/*) importlib==1.0.2 (importlib/*, needed for Python 2.6 support) -iso8601==0.1.4 (iso8601/*) -keyring==3.0.5 (keyring/*, excluded bin/keyring) -kombu==2.5.14 (kombu/*) +iso8601==0.1.8 (iso8601/*) +keyring==3.2 (keyring/*, excluded bin/keyring) +kombu==3.0.4 (kombu/*) Markdown==2.3.1 (markdown/*, excluded bin/markdown_py) mock==1.0.1 (mock.py) ordereddict==1.1 (ordereddict.py, needed for Python 2.6 support) os-diskconfig-python-novaclient-ext==0.1.1 (os_diskconfig_python_novaclient_ext/*) os-networksv2-python-novaclient-ext==0.21 (os_networksv2_python_novaclient_ext.py) -pbr==0.5.21 (pbr/*) -pexpect==2.4 (pexpect.py, pxssh.py, fdpexpect.py, FSM.py, screen.py, ANSI.py) +pbr==0.5.23 (pbr/*) +pexpect==3.0 (pexpect/*, excluded pxssh.py, fdpexpect.py, FSM.py, screen.py, + ANSI.py) pip==1.4.1 (pip/*, excluded bin/pip*) prettytable==0.7.2 (prettytable.py) -pyrax==1.5.0 (pyrax/*) -python-dateutil==2.1 (dateutil/*) +pyrax==1.6.2 (pyrax/*) +python-dateutil==2.2 (dateutil/*) python-novaclient==2.15.0 (novaclient/*, excluded bin/nova) -python-swiftclient==1.6.0 (swiftclient/*, excluded bin/swift) -pytz==2013d (pytz/*) -rackspace-auth-openstack==1.0 (rackspace_auth_openstack/*) +python-swiftclient==1.8.0 (swiftclient/*, excluded bin/swift) +pytz==2013.8 (pytz/*) +rackspace-auth-openstack==1.1 (rackspace_auth_openstack/*) rackspace-novaclient==1.3 (no files) rax-default-network-flags-python-novaclient-ext==0.1.3 (rax_default_network_flags_python_novaclient_ext/*) rax-scheduled-images-python-novaclient-ext==0.2.1 (rax_scheduled_images_python_novaclient_ext/*) -requests==2.0.0 (requests/*) -setuptools==1.1.6 (setuptools/*, _markerlib/*, pkg_resources.py, easy_install.py, excluded bin/easy_install*) -simplejson==3.3.0 (simplejson/*, excluded simplejson/_speedups.so) +requests==2.0.1 (requests/*) +setuptools==1.3.2 (setuptools/*, _markerlib/*, pkg_resources.py, easy_install.py, excluded bin/easy_install*) +simplejson==3.3.1 (simplejson/*, excluded simplejson/_speedups.so) six==1.4.1 (six.py) -South==0.8.2 (south/*) +South==0.8.3 (south/*) diff --git a/awx/lib/site-packages/amqp/__init__.py b/awx/lib/site-packages/amqp/__init__.py index 00bd9e2ce4..210942bb0e 100644 --- a/awx/lib/site-packages/amqp/__init__.py +++ b/awx/lib/site-packages/amqp/__init__.py @@ -16,7 +16,7 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 from __future__ import absolute_import -VERSION = (1, 2, 1) +VERSION = (1, 3, 3) __version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:]) __author__ = 'Barry Pederson' __maintainer__ = 'Ask Solem' @@ -61,6 +61,7 @@ from .exceptions import ( # noqa error_for_code, __all__ as _all_exceptions, ) +from .utils import promise # noqa __all__ = [ 'Connection', diff --git a/awx/lib/site-packages/amqp/channel.py b/awx/lib/site-packages/amqp/channel.py index 48188a5106..ea59f0c1b0 100644 --- a/awx/lib/site-packages/amqp/channel.py +++ b/awx/lib/site-packages/amqp/channel.py @@ -24,6 +24,7 @@ from warnings import warn from .abstract_channel import AbstractChannel from .exceptions import ChannelError, ConsumerCancelled, error_for_code from .five import Queue +from .protocol import basic_return_t, queue_declare_ok_t from .serialization import AMQPWriter __all__ = ['Channel'] @@ -80,6 +81,12 @@ class Channel(AbstractChannel): self.events = defaultdict(set) self.no_ack_consumers = set() + # set first time basic_publish_confirm is called + # and publisher confirms are enabled for this channel. + self._confirm_selected = False + if self.connection.confirm_publish: + self.basic_publish = self.basic_publish_confirm + self._x_open() def _do_close(self): @@ -1272,10 +1279,11 @@ class Channel(AbstractChannel): this count. """ - queue = args.read_shortstr() - message_count = args.read_long() - consumer_count = args.read_long() - return queue, message_count, consumer_count + return queue_declare_ok_t( + args.read_shortstr(), + args.read_long(), + args.read_long(), + ) def queue_delete(self, queue='', if_unused=False, if_empty=False, nowait=False): @@ -1875,6 +1883,7 @@ class Channel(AbstractChannel): exchange = args.read_shortstr() routing_key = args.read_shortstr() + msg.channel = self msg.delivery_info = { 'consumer_tag': consumer_tag, 'delivery_tag': delivery_tag, @@ -1883,8 +1892,11 @@ class Channel(AbstractChannel): 'routing_key': routing_key, } - fun = self.callbacks.get(consumer_tag, None) - if fun is not None: + try: + fun = self.callbacks[consumer_tag] + except KeyError: + pass + else: fun(msg) def basic_get(self, queue='', no_ack=False): @@ -2015,6 +2027,7 @@ class Channel(AbstractChannel): routing_key = args.read_shortstr() message_count = args.read_long() + msg.channel = self msg.delivery_info = { 'delivery_tag': delivery_tag, 'redelivered': redelivered, @@ -2024,8 +2037,8 @@ class Channel(AbstractChannel): } return msg - def basic_publish(self, msg, exchange='', routing_key='', - mandatory=False, immediate=False): + def _basic_publish(self, msg, exchange='', routing_key='', + mandatory=False, immediate=False): """Publish a message This method publishes a message to a specific exchange. The @@ -2099,6 +2112,15 @@ class Channel(AbstractChannel): args.write_bit(immediate) self._send_method((60, 40), args, msg) + basic_publish = _basic_publish + + def basic_publish_confirm(self, *args, **kwargs): + if not self._confirm_selected: + self._confirm_selected = True + self.confirm_select() + ret = self._basic_publish(*args, **kwargs) + self.wait([(60, 80)]) + return ret def basic_qos(self, prefetch_size, prefetch_count, a_global): """Specify quality of service @@ -2334,14 +2356,13 @@ class Channel(AbstractChannel): message was published. """ - reply_code = args.read_short() - reply_text = args.read_shortstr() - exchange = args.read_shortstr() - routing_key = args.read_shortstr() - - self.returned_messages.put( - (reply_code, reply_text, exchange, routing_key, msg) - ) + self.returned_messages.put(basic_return_t( + args.read_short(), + args.read_shortstr(), + args.read_shortstr(), + args.read_shortstr(), + msg, + )) ############# # diff --git a/awx/lib/site-packages/amqp/connection.py b/awx/lib/site-packages/amqp/connection.py index 12b72128a7..9474ab2a18 100644 --- a/awx/lib/site-packages/amqp/connection.py +++ b/awx/lib/site-packages/amqp/connection.py @@ -89,7 +89,7 @@ class Connection(AbstractChannel): virtual_host='/', locale='en_US', client_properties=None, ssl=False, connect_timeout=None, channel_max=None, frame_max=None, heartbeat=0, on_blocked=None, - on_unblocked=None, **kwargs): + on_unblocked=None, confirm_publish=False, **kwargs): """Create a connection to the specified host, which should be a 'host[:port]', such as 'localhost', or '1.2.3.4:5672' (defaults to 'localhost', if a port is not specified then @@ -127,6 +127,8 @@ class Connection(AbstractChannel): self.frame_max = frame_max self.heartbeat = heartbeat + self.confirm_publish = confirm_publish + # Callbacks self.on_blocked = on_blocked self.on_unblocked = on_unblocked @@ -163,6 +165,10 @@ class Connection(AbstractChannel): return self._x_open(virtual_host) + @property + def connected(self): + return self.transport and self.transport.connected + def _do_close(self): try: self.transport.close() diff --git a/awx/lib/site-packages/amqp/exceptions.py b/awx/lib/site-packages/amqp/exceptions.py index d8bcf33548..e3e144a509 100644 --- a/awx/lib/site-packages/amqp/exceptions.py +++ b/awx/lib/site-packages/amqp/exceptions.py @@ -47,7 +47,9 @@ class AMQPError(Exception): reply_text, method_sig, self.method_name) def __str__(self): - return '{0.method}: ({0.reply_code}) {0.reply_text}'.format(self) + if self.method: + return '{0.method}: ({0.reply_code}) {0.reply_text}'.format(self) + return self.reply_text or '' @property def method(self): diff --git a/awx/lib/site-packages/amqp/method_framing.py b/awx/lib/site-packages/amqp/method_framing.py index a2722139bc..85fbfba5dd 100644 --- a/awx/lib/site-packages/amqp/method_framing.py +++ b/awx/lib/site-packages/amqp/method_framing.py @@ -46,7 +46,7 @@ _CONTENT_METHODS = [ class _PartialMessage(object): """Helper class to build up a multi-frame method.""" - def __init__(self, method_sig, args): + def __init__(self, method_sig, args, channel): self.method_sig = method_sig self.args = args self.msg = Message() @@ -147,7 +147,9 @@ class MethodReader(object): # # Save what we've got so far and wait for the content-header # - self.partial_messages[channel] = _PartialMessage(method_sig, args) + self.partial_messages[channel] = _PartialMessage( + method_sig, args, channel, + ) self.expected_types[channel] = 2 else: self._quick_put((channel, method_sig, args, None)) diff --git a/awx/lib/site-packages/amqp/protocol.py b/awx/lib/site-packages/amqp/protocol.py new file mode 100644 index 0000000000..0856eb4e13 --- /dev/null +++ b/awx/lib/site-packages/amqp/protocol.py @@ -0,0 +1,13 @@ +from __future__ import absolute_import + +from collections import namedtuple + + +queue_declare_ok_t = namedtuple( + 'queue_declare_ok_t', ('queue', 'message_count', 'consumer_count'), +) + +basic_return_t = namedtuple( + 'basic_return_t', + ('reply_code', 'reply_text', 'exchange', 'routing_key', 'message'), +) diff --git a/awx/lib/site-packages/amqp/transport.py b/awx/lib/site-packages/amqp/transport.py index 6b17757cae..975ced16b7 100644 --- a/awx/lib/site-packages/amqp/transport.py +++ b/awx/lib/site-packages/amqp/transport.py @@ -49,6 +49,9 @@ except: from struct import pack, unpack from .exceptions import UnexpectedFrame +from .utils import get_errno, set_cloexec + +_UNAVAIL = errno.EAGAIN, errno.EINTR AMQP_PORT = 5672 @@ -63,8 +66,10 @@ IPV6_LITERAL = re.compile(r'\[([\.0-9a-f:]+)\](?::(\d+))?') class _AbstractTransport(object): """Common superclass for TCP and SSL transports""" + connected = False def __init__(self, host, connect_timeout): + self.connected = True msg = None port = AMQP_PORT @@ -85,6 +90,10 @@ class _AbstractTransport(object): af, socktype, proto, canonname, sa = res try: self.sock = socket.socket(af, socktype, proto) + try: + set_cloexec(self.sock, True) + except NotImplementedError: + pass self.sock.settimeout(connect_timeout) self.sock.connect(sa) except socket.error as exc: @@ -99,13 +108,18 @@ class _AbstractTransport(object): # Didn't connect, return the most recent error message raise socket.error(last_err) - self.sock.settimeout(None) - self.sock.setsockopt(SOL_TCP, socket.TCP_NODELAY, 1) - self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + try: + self.sock.settimeout(None) + self.sock.setsockopt(SOL_TCP, socket.TCP_NODELAY, 1) + self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - self._setup_transport() + self._setup_transport() - self._write(AMQP_PROTOCOL_HEADER) + self._write(AMQP_PROTOCOL_HEADER) + except (OSError, IOError, socket.error) as exc: + if get_errno(exc) not in _UNAVAIL: + self.connected = False + raise def __del__(self): try: @@ -141,12 +155,20 @@ class _AbstractTransport(object): self.sock.shutdown(socket.SHUT_RDWR) self.sock.close() self.sock = None + self.connected = False def read_frame(self, unpack=unpack): read = self._read - frame_type, channel, size = unpack('>BHI', read(7, True)) - payload = read(size) - ch = ord(read(1)) + try: + frame_type, channel, size = unpack('>BHI', read(7, True)) + payload = read(size) + ch = ord(read(1)) + except socket.timeout: + raise + except (OSError, IOError, socket.error) as exc: + if get_errno(exc) not in _UNAVAIL: + self.connected = False + raise if ch == 206: # '\xce' return frame_type, channel, payload else: @@ -155,10 +177,17 @@ class _AbstractTransport(object): def write_frame(self, frame_type, channel, payload): size = len(payload) - self._write(pack( - '>BHI%dsB' % size, - frame_type, channel, size, payload, 0xce, - )) + try: + self._write(pack( + '>BHI%dsB' % size, + frame_type, channel, size, payload, 0xce, + )) + except socket.timeout: + raise + except (OSError, IOError, socket.error) as exc: + if get_errno(exc) not in _UNAVAIL: + self.connected = False + raise class SSLTransport(_AbstractTransport): @@ -200,19 +229,22 @@ class SSLTransport(_AbstractTransport): # to get the exact number of bytes wanted. recv = self._quick_recv rbuf = self._read_buffer - while len(rbuf) < n: - try: - s = recv(131072) # see note above - except socket.error as exc: - # ssl.sock.read may cause ENOENT if the - # operation couldn't be performed (Issue celery#1414). - if not initial and exc.errno in _errnos: - continue - raise exc - if not s: - raise IOError('Socket closed') - rbuf += s - + try: + while len(rbuf) < n: + try: + s = recv(131072) # see note above + except socket.error as exc: + # ssl.sock.read may cause ENOENT if the + # operation couldn't be performed (Issue celery#1414). + if not initial and exc.errno in _errnos: + continue + raise + if not s: + raise IOError('Socket closed') + rbuf += s + except: + self._read_buffer = rbuf + raise result, self._read_buffer = rbuf[:n], rbuf[n:] return result @@ -240,16 +272,20 @@ class TCPTransport(_AbstractTransport): """Read exactly n bytes from the socket""" recv = self._quick_recv rbuf = self._read_buffer - while len(rbuf) < n: - try: - s = recv(131072) - except socket.error as exc: - if not initial and exc.errno in _errnos: - continue - raise - if not s: - raise IOError('Socket closed') - rbuf += s + try: + while len(rbuf) < n: + try: + s = recv(131072) + except socket.error as exc: + if not initial and exc.errno in _errnos: + continue + raise + if not s: + raise IOError('Socket closed') + rbuf += s + except: + self._read_buffer = rbuf + raise result, self._read_buffer = rbuf[:n], rbuf[n:] return result diff --git a/awx/lib/site-packages/amqp/utils.py b/awx/lib/site-packages/amqp/utils.py index 05dbc93bf8..994030b351 100644 --- a/awx/lib/site-packages/amqp/utils.py +++ b/awx/lib/site-packages/amqp/utils.py @@ -2,6 +2,11 @@ from __future__ import absolute_import import sys +try: + import fcntl +except ImportError: + fcntl = None # noqa + class promise(object): if not hasattr(sys, 'pypy_version_info'): @@ -59,3 +64,36 @@ class promise(object): def noop(): return promise(lambda *a, **k: None) + + +try: + from os import set_cloexec # Python 3.4? +except ImportError: + def set_cloexec(fd, cloexec): # noqa + try: + FD_CLOEXEC = fcntl.FD_CLOEXEC + except AttributeError: + raise NotImplementedError( + 'close-on-exec flag not supported on this platform', + ) + flags = fcntl.fcntl(fd, fcntl.F_GETFD) + if cloexec: + flags |= FD_CLOEXEC + else: + flags &= ~FD_CLOEXEC + return fcntl.fcntl(fd, fcntl.F_SETFD, flags) + + +def get_errno(exc): + """:exc:`socket.error` and :exc:`IOError` first got + the ``.errno`` attribute in Py2.7""" + try: + return exc.errno + except AttributeError: + try: + # e.args = (errno, reason) + if isinstance(exc.args, tuple) and len(exc.args) == 2: + return exc.args[0] + except AttributeError: + pass + return 0 diff --git a/awx/lib/site-packages/billiard/__init__.py b/awx/lib/site-packages/billiard/__init__.py index 846d01c168..ab5642cde7 100644 --- a/awx/lib/site-packages/billiard/__init__.py +++ b/awx/lib/site-packages/billiard/__init__.py @@ -18,9 +18,8 @@ # from __future__ import absolute_import -from __future__ import with_statement -VERSION = (2, 7, 3, 32) +VERSION = (3, 3, 0, 6) __version__ = ".".join(map(str, VERSION[0:4])) + "".join(VERSION[4:]) __author__ = 'R Oudkerk / Python Software Foundation' __author_email__ = 'python-dev@python.org' @@ -90,15 +89,12 @@ def Manager(): return m -def Pipe(duplex=True): +def Pipe(duplex=True, rnonblock=False, wnonblock=False): ''' Returns two connection object connected by a pipe ''' - if sys.version_info[0] == 3: - from multiprocessing.connection import Pipe - else: - from billiard._connection import Pipe - return Pipe(duplex) + from billiard.connection import Pipe + return Pipe(duplex, rnonblock, wnonblock) def cpu_count(): @@ -241,7 +237,11 @@ def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None, Returns a process pool object ''' from .pool import Pool - return Pool(processes, initializer, initargs, maxtasksperchild) + return Pool(processes, initializer, initargs, maxtasksperchild, + timeout, soft_timeout, lost_worker_timeout, + max_restarts, max_restart_freq, on_process_up, + on_process_down, on_timeout_set, on_timeout_cancel, + threads, semaphore, putlocks, allow_restart) def RawValue(typecode_or_type, *args): diff --git a/awx/lib/site-packages/billiard/_connection.py b/awx/lib/site-packages/billiard/_connection.py index 6d8ffde839..1bd185d640 100644 --- a/awx/lib/site-packages/billiard/_connection.py +++ b/awx/lib/site-packages/billiard/_connection.py @@ -8,7 +8,6 @@ # from __future__ import absolute_import -from __future__ import with_statement __all__ = ['Client', 'Listener', 'Pipe'] @@ -21,11 +20,13 @@ import tempfile import itertools from . import AuthenticationError +from . import reduction from ._ext import _billiard, win32 -from .compat import get_errno -from .util import get_temp_dir, Finalize, sub_debug, debug +from .compat import get_errno, bytes, setblocking +from .five import monotonic from .forking import duplicate, close -from .compat import bytes +from .reduction import ForkingPickler +from .util import get_temp_dir, Finalize, sub_debug, debug try: WindowsError = WindowsError # noqa @@ -36,6 +37,9 @@ except NameError: # global set later xmlrpclib = None +Connection = getattr(_billiard, 'Connection', None) +PipeConnection = getattr(_billiard, 'PipeConnection', None) + # # @@ -60,11 +64,11 @@ if sys.platform == 'win32': def _init_timeout(timeout=CONNECTION_TIMEOUT): - return time.time() + timeout + return monotonic() + timeout def _check_timeout(t): - return time.time() > t + return monotonic() > t # # @@ -81,7 +85,7 @@ def arbitrary_address(family): return tempfile.mktemp(prefix='listener-', dir=get_temp_dir()) elif family == 'AF_PIPE': return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % - (os.getpid(), _mmap_counter.next())) + (os.getpid(), next(_mmap_counter))) else: raise ValueError('unrecognized family') @@ -183,26 +187,32 @@ def Client(address, family=None, authkey=None): if sys.platform != 'win32': - def Pipe(duplex=True): + def Pipe(duplex=True, rnonblock=False, wnonblock=False): ''' Returns pair of connection objects at either end of a pipe ''' if duplex: s1, s2 = socket.socketpair() - c1 = _billiard.Connection(os.dup(s1.fileno())) - c2 = _billiard.Connection(os.dup(s2.fileno())) + s1.setblocking(not rnonblock) + s2.setblocking(not wnonblock) + c1 = Connection(os.dup(s1.fileno())) + c2 = Connection(os.dup(s2.fileno())) s1.close() s2.close() else: fd1, fd2 = os.pipe() - c1 = _billiard.Connection(fd1, writable=False) - c2 = _billiard.Connection(fd2, readable=False) + if rnonblock: + setblocking(fd1, 0) + if wnonblock: + setblocking(fd2, 0) + c1 = Connection(fd1, writable=False) + c2 = Connection(fd2, readable=False) return c1, c2 else: - def Pipe(duplex=True): # noqa + def Pipe(duplex=True, rnonblock=False, wnonblock=False): # noqa ''' Returns pair of connection objects at either end of a pipe ''' @@ -231,12 +241,12 @@ else: try: win32.ConnectNamedPipe(h1, win32.NULL) - except WindowsError, e: - if e.args[0] != win32.ERROR_PIPE_CONNECTED: + except WindowsError as exc: + if exc.args[0] != win32.ERROR_PIPE_CONNECTED: raise - c1 = _billiard.PipeConnection(h1, writable=duplex) - c2 = _billiard.PipeConnection(h2, readable=duplex) + c1 = PipeConnection(h1, writable=duplex) + c2 = PipeConnection(h2, readable=duplex) return c1, c2 @@ -275,7 +285,7 @@ class SocketListener(object): def accept(self): s, self._last_accepted = self._socket.accept() fd = duplicate(s.fileno()) - conn = _billiard.Connection(fd) + conn = Connection(fd) s.close() return conn @@ -296,7 +306,7 @@ def SocketClient(address): while 1: try: s.connect(address) - except socket.error, exc: + except socket.error as exc: if get_errno(exc) != errno.ECONNREFUSED or _check_timeout(t): debug('failed to connect to address %s', address) raise @@ -307,7 +317,7 @@ def SocketClient(address): raise fd = duplicate(s.fileno()) - conn = _billiard.Connection(fd) + conn = Connection(fd) s.close() return conn @@ -352,10 +362,10 @@ if sys.platform == 'win32': handle = self._handle_queue.pop(0) try: win32.ConnectNamedPipe(handle, win32.NULL) - except WindowsError, e: - if e.args[0] != win32.ERROR_PIPE_CONNECTED: + except WindowsError as exc: + if exc.args[0] != win32.ERROR_PIPE_CONNECTED: raise - return _billiard.PipeConnection(handle) + return PipeConnection(handle) @staticmethod def _finalize_pipe_listener(queue, address): @@ -375,8 +385,8 @@ if sys.platform == 'win32': address, win32.GENERIC_READ | win32.GENERIC_WRITE, 0, win32.NULL, win32.OPEN_EXISTING, 0, win32.NULL, ) - except WindowsError, e: - if e.args[0] not in ( + except WindowsError as exc: + if exc.args[0] not in ( win32.ERROR_SEM_TIMEOUT, win32.ERROR_PIPE_BUSY) or _check_timeout(t): raise @@ -388,7 +398,7 @@ if sys.platform == 'win32': win32.SetNamedPipeHandleState( h, win32.PIPE_READMODE_MESSAGE, None, None ) - return _billiard.PipeConnection(h) + return PipeConnection(h) # # Authentication stuff @@ -471,3 +481,12 @@ def XmlClient(*args, **kwds): global xmlrpclib import xmlrpclib # noqa return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) + + +if sys.platform == 'win32': + ForkingPickler.register(socket.socket, reduction.reduce_socket) + ForkingPickler.register(Connection, reduction.reduce_connection) + ForkingPickler.register(PipeConnection, reduction.reduce_pipe_connection) +else: + ForkingPickler.register(socket.socket, reduction.reduce_socket) + ForkingPickler.register(Connection, reduction.reduce_connection) diff --git a/awx/lib/site-packages/billiard/_connection3.py b/awx/lib/site-packages/billiard/_connection3.py new file mode 100644 index 0000000000..4cefcda5dd --- /dev/null +++ b/awx/lib/site-packages/billiard/_connection3.py @@ -0,0 +1,955 @@ +# +# A higher level module for using sockets (or Windows named pipes) +# +# multiprocessing/connection.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# +from __future__ import absolute_import + +__all__ = ['Client', 'Listener', 'Pipe', 'wait'] + +import io +import os +import sys +import select +import socket +import struct +import errno +import tempfile +import itertools + +import _multiprocessing +from .compat import setblocking +from .exceptions import AuthenticationError, BufferTooShort +from .five import monotonic +from .util import get_temp_dir, Finalize, sub_debug +from .reduction import ForkingPickler +try: + import _winapi + from _winapi import ( + WAIT_OBJECT_0, + WAIT_ABANDONED_0, + WAIT_TIMEOUT, + INFINITE, + ) +except ImportError: + if sys.platform == 'win32': + raise + _winapi = None + +# +# +# + +BUFSIZE = 8192 +# A very generous timeout when it comes to local connections... +CONNECTION_TIMEOUT = 20. + +_mmap_counter = itertools.count() + +default_family = 'AF_INET' +families = ['AF_INET'] + +if hasattr(socket, 'AF_UNIX'): + default_family = 'AF_UNIX' + families += ['AF_UNIX'] + +if sys.platform == 'win32': + default_family = 'AF_PIPE' + families += ['AF_PIPE'] + + +def _init_timeout(timeout=CONNECTION_TIMEOUT): + return monotonic() + timeout + + +def _check_timeout(t): + return monotonic() > t + + +def arbitrary_address(family): + ''' + Return an arbitrary free address for the given family + ''' + if family == 'AF_INET': + return ('localhost', 0) + elif family == 'AF_UNIX': + return tempfile.mktemp(prefix='listener-', dir=get_temp_dir()) + elif family == 'AF_PIPE': + return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % + (os.getpid(), next(_mmap_counter))) + else: + raise ValueError('unrecognized family') + + +def _validate_family(family): + ''' + Checks if the family is valid for the current environment. + ''' + if sys.platform != 'win32' and family == 'AF_PIPE': + raise ValueError('Family %s is not recognized.' % family) + + if sys.platform == 'win32' and family == 'AF_UNIX': + # double check + if not hasattr(socket, family): + raise ValueError('Family %s is not recognized.' % family) + + +def address_type(address): + ''' + Return the types of the address + + This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' + ''' + if type(address) == tuple: + return 'AF_INET' + elif type(address) is str and address.startswith('\\\\'): + return 'AF_PIPE' + elif type(address) is str: + return 'AF_UNIX' + else: + raise ValueError('address type of %r unrecognized' % address) + +# +# Connection classes +# + + +class _ConnectionBase: + _handle = None + + def __init__(self, handle, readable=True, writable=True): + handle = handle.__index__() + if handle < 0: + raise ValueError("invalid handle") + if not readable and not writable: + raise ValueError( + "at least one of `readable` and `writable` must be True") + self._handle = handle + self._readable = readable + self._writable = writable + + # XXX should we use util.Finalize instead of a __del__? + + def __del__(self): + if self._handle is not None: + self._close() + + def _check_closed(self): + if self._handle is None: + raise OSError("handle is closed") + + def _check_readable(self): + if not self._readable: + raise OSError("connection is write-only") + + def _check_writable(self): + if not self._writable: + raise OSError("connection is read-only") + + def _bad_message_length(self): + if self._writable: + self._readable = False + else: + self.close() + raise OSError("bad message length") + + @property + def closed(self): + """True if the connection is closed""" + return self._handle is None + + @property + def readable(self): + """True if the connection is readable""" + return self._readable + + @property + def writable(self): + """True if the connection is writable""" + return self._writable + + def fileno(self): + """File descriptor or handle of the connection""" + self._check_closed() + return self._handle + + def close(self): + """Close the connection""" + if self._handle is not None: + try: + self._close() + finally: + self._handle = None + + def send_bytes(self, buf, offset=0, size=None): + """Send the bytes data from a bytes-like object""" + self._check_closed() + self._check_writable() + m = memoryview(buf) + # HACK for byte-indexing of non-bytewise buffers (e.g. array.array) + if m.itemsize > 1: + m = memoryview(bytes(m)) + n = len(m) + if offset < 0: + raise ValueError("offset is negative") + if n < offset: + raise ValueError("buffer length < offset") + if size is None: + size = n - offset + elif size < 0: + raise ValueError("size is negative") + elif offset + size > n: + raise ValueError("buffer length < offset + size") + self._send_bytes(m[offset:offset + size]) + + def send(self, obj): + """Send a (picklable) object""" + self._check_closed() + self._check_writable() + self._send_bytes(ForkingPickler.dumps(obj)) + + def recv_bytes(self, maxlength=None): + """ + Receive bytes data as a bytes object. + """ + self._check_closed() + self._check_readable() + if maxlength is not None and maxlength < 0: + raise ValueError("negative maxlength") + buf = self._recv_bytes(maxlength) + if buf is None: + self._bad_message_length() + return buf.getvalue() + + def recv_bytes_into(self, buf, offset=0): + """ + Receive bytes data into a writeable buffer-like object. + Return the number of bytes read. + """ + self._check_closed() + self._check_readable() + with memoryview(buf) as m: + # Get bytesize of arbitrary buffer + itemsize = m.itemsize + bytesize = itemsize * len(m) + if offset < 0: + raise ValueError("negative offset") + elif offset > bytesize: + raise ValueError("offset too large") + result = self._recv_bytes() + size = result.tell() + if bytesize < offset + size: + raise BufferTooShort(result.getvalue()) + # Message can fit in dest + result.seek(0) + result.readinto( + m[offset // itemsize:(offset + size) // itemsize] + ) + return size + + def recv_payload(self): + return self._recv_bytes().getbuffer() + + def recv(self): + """Receive a (picklable) object""" + self._check_closed() + self._check_readable() + buf = self._recv_bytes() + return ForkingPickler.loads(buf.getbuffer()) + + def poll(self, timeout=0.0): + """Whether there is any input available to be read""" + self._check_closed() + self._check_readable() + return self._poll(timeout) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +if _winapi: + + class PipeConnection(_ConnectionBase): + """ + Connection class based on a Windows named pipe. + Overlapped I/O is used, so the handles must have been created + with FILE_FLAG_OVERLAPPED. + """ + _got_empty_message = False + + def _close(self, _CloseHandle=_winapi.CloseHandle): + _CloseHandle(self._handle) + + def _send_bytes(self, buf): + ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) + try: + if err == _winapi.ERROR_IO_PENDING: + waitres = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + assert waitres == WAIT_OBJECT_0 + except: + ov.cancel() + raise + finally: + nwritten, err = ov.GetOverlappedResult(True) + assert err == 0 + assert nwritten == len(buf) + + def _recv_bytes(self, maxsize=None): + if self._got_empty_message: + self._got_empty_message = False + return io.BytesIO() + else: + bsize = 128 if maxsize is None else min(maxsize, 128) + try: + ov, err = _winapi.ReadFile(self._handle, bsize, + overlapped=True) + try: + if err == _winapi.ERROR_IO_PENDING: + waitres = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + assert waitres == WAIT_OBJECT_0 + except: + ov.cancel() + raise + finally: + nread, err = ov.GetOverlappedResult(True) + if err == 0: + f = io.BytesIO() + f.write(ov.getbuffer()) + return f + elif err == _winapi.ERROR_MORE_DATA: + return self._get_more_data(ov, maxsize) + except OSError as e: + if e.winerror == _winapi.ERROR_BROKEN_PIPE: + raise EOFError + else: + raise + raise RuntimeError( + "shouldn't get here; expected KeyboardInterrupt" + ) + + def _poll(self, timeout): + if (self._got_empty_message or + _winapi.PeekNamedPipe(self._handle)[0] != 0): + return True + return bool(wait([self], timeout)) + + def _get_more_data(self, ov, maxsize): + buf = ov.getbuffer() + f = io.BytesIO() + f.write(buf) + left = _winapi.PeekNamedPipe(self._handle)[1] + assert left > 0 + if maxsize is not None and len(buf) + left > maxsize: + self._bad_message_length() + ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) + rbytes, err = ov.GetOverlappedResult(True) + assert err == 0 + assert rbytes == left + f.write(ov.getbuffer()) + return f + + +class Connection(_ConnectionBase): + """ + Connection class based on an arbitrary file descriptor (Unix only), or + a socket handle (Windows). + """ + + if _winapi: + def _close(self, _close=_multiprocessing.closesocket): + _close(self._handle) + _write = _multiprocessing.send + _read = _multiprocessing.recv + else: + def _close(self, _close=os.close): # noqa + _close(self._handle) + _write = os.write + _read = os.read + + def send_offset(self, buf, offset, write=_write): + return write(self._handle, buf[offset:]) + + def _send(self, buf, write=_write): + remaining = len(buf) + while True: + try: + n = write(self._handle, buf) + except OSError as exc: + if exc.errno == errno.EINTR: + continue + raise + remaining -= n + if remaining == 0: + break + buf = buf[n:] + + def setblocking(self, blocking): + setblocking(self._handle, blocking) + + def _recv(self, size, read=_read): + buf = io.BytesIO() + handle = self._handle + remaining = size + while remaining > 0: + try: + chunk = read(handle, remaining) + except OSError as exc: + if exc.errno == errno.EINTR: + continue + raise + n = len(chunk) + if n == 0: + if remaining == size: + raise EOFError + else: + raise OSError("got end of file during message") + buf.write(chunk) + remaining -= n + return buf + + def _send_bytes(self, buf): + # For wire compatibility with 3.2 and lower + n = len(buf) + self._send(struct.pack("!i", n)) + # The condition is necessary to avoid "broken pipe" errors + # when sending a 0-length buffer if the other end closed the pipe. + if n > 0: + self._send(buf) + + def _recv_bytes(self, maxsize=None): + buf = self._recv(4) + size, = struct.unpack("!i", buf.getvalue()) + if maxsize is not None and size > maxsize: + return None + return self._recv(size) + + def _poll(self, timeout): + r = wait([self], timeout) + return bool(r) + + +# +# Public functions +# + +class Listener(object): + ''' + Returns a listener object. + + This is a wrapper for a bound socket which is 'listening' for + connections, or for a Windows named pipe. + ''' + def __init__(self, address=None, family=None, backlog=1, authkey=None): + family = (family or (address and address_type(address)) + or default_family) + address = address or arbitrary_address(family) + + _validate_family(family) + if family == 'AF_PIPE': + self._listener = PipeListener(address, backlog) + else: + self._listener = SocketListener(address, family, backlog) + + if authkey is not None and not isinstance(authkey, bytes): + raise TypeError('authkey should be a byte string') + + self._authkey = authkey + + def accept(self): + ''' + Accept a connection on the bound socket or named pipe of `self`. + + Returns a `Connection` object. + ''' + if self._listener is None: + raise OSError('listener is closed') + c = self._listener.accept() + if self._authkey: + deliver_challenge(c, self._authkey) + answer_challenge(c, self._authkey) + return c + + def close(self): + ''' + Close the bound socket or named pipe of `self`. + ''' + if self._listener is not None: + self._listener.close() + self._listener = None + + address = property(lambda self: self._listener._address) + last_accepted = property(lambda self: self._listener._last_accepted) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +def Client(address, family=None, authkey=None): + ''' + Returns a connection to the address of a `Listener` + ''' + family = family or address_type(address) + _validate_family(family) + if family == 'AF_PIPE': + c = PipeClient(address) + else: + c = SocketClient(address) + + if authkey is not None and not isinstance(authkey, bytes): + raise TypeError('authkey should be a byte string') + + if authkey is not None: + answer_challenge(c, authkey) + deliver_challenge(c, authkey) + + return c + + +if sys.platform != 'win32': + + def Pipe(duplex=True, rnonblock=False, wnonblock=False): + ''' + Returns pair of connection objects at either end of a pipe + ''' + if duplex: + s1, s2 = socket.socketpair() + s1.setblocking(not rnonblock) + s2.setblocking(not wnonblock) + c1 = Connection(s1.detach()) + c2 = Connection(s2.detach()) + else: + fd1, fd2 = os.pipe() + if rnonblock: + setblocking(fd1, 0) + if wnonblock: + setblocking(fd2, 0) + c1 = Connection(fd1, writable=False) + c2 = Connection(fd2, readable=False) + + return c1, c2 + +else: + + def Pipe(duplex=True, rnonblock=False, wnonblock=False): # noqa + ''' + Returns pair of connection objects at either end of a pipe + ''' + address = arbitrary_address('AF_PIPE') + if duplex: + openmode = _winapi.PIPE_ACCESS_DUPLEX + access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE + obsize, ibsize = BUFSIZE, BUFSIZE + else: + openmode = _winapi.PIPE_ACCESS_INBOUND + access = _winapi.GENERIC_WRITE + obsize, ibsize = 0, BUFSIZE + + h1 = _winapi.CreateNamedPipe( + address, openmode | _winapi.FILE_FLAG_OVERLAPPED | + _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | + _winapi.PIPE_WAIT, + 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL + ) + h2 = _winapi.CreateFile( + address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, + _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL + ) + _winapi.SetNamedPipeHandleState( + h2, _winapi.PIPE_READMODE_MESSAGE, None, None + ) + + overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) + _, err = overlapped.GetOverlappedResult(True) + assert err == 0 + + c1 = PipeConnection(h1, writable=duplex) + c2 = PipeConnection(h2, readable=duplex) + + return c1, c2 + +# +# Definitions for connections based on sockets +# + + +class SocketListener(object): + ''' + Representation of a socket which is bound to an address and listening + ''' + def __init__(self, address, family, backlog=1): + self._socket = socket.socket(getattr(socket, family)) + try: + # SO_REUSEADDR has different semantics on Windows (issue #2550). + if os.name == 'posix': + self._socket.setsockopt(socket.SOL_SOCKET, + socket.SO_REUSEADDR, 1) + self._socket.setblocking(True) + self._socket.bind(address) + self._socket.listen(backlog) + self._address = self._socket.getsockname() + except OSError: + self._socket.close() + raise + self._family = family + self._last_accepted = None + + if family == 'AF_UNIX': + self._unlink = Finalize( + self, os.unlink, args=(address, ), exitpriority=0 + ) + else: + self._unlink = None + + def accept(self): + while True: + try: + s, self._last_accepted = self._socket.accept() + except OSError as exc: + if exc.errno == errno.EINTR: + continue + raise + else: + break + s.setblocking(True) + return Connection(s.detach()) + + def close(self): + self._socket.close() + if self._unlink is not None: + self._unlink() + + +def SocketClient(address): + ''' + Return a connection object connected to the socket given by `address` + ''' + family = address_type(address) + with socket.socket(getattr(socket, family)) as s: + s.setblocking(True) + s.connect(address) + return Connection(s.detach()) + +# +# Definitions for connections based on named pipes +# + +if sys.platform == 'win32': + + class PipeListener(object): + ''' + Representation of a named pipe + ''' + def __init__(self, address, backlog=None): + self._address = address + self._handle_queue = [self._new_handle(first=True)] + + self._last_accepted = None + sub_debug('listener created with address=%r', self._address) + self.close = Finalize( + self, PipeListener._finalize_pipe_listener, + args=(self._handle_queue, self._address), exitpriority=0 + ) + + def _new_handle(self, first=False): + flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED + if first: + flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE + return _winapi.CreateNamedPipe( + self._address, flags, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | + _winapi.PIPE_WAIT, + _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, + _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL + ) + + def accept(self): + self._handle_queue.append(self._new_handle()) + handle = self._handle_queue.pop(0) + try: + ov = _winapi.ConnectNamedPipe(handle, overlapped=True) + except OSError as e: + if e.winerror != _winapi.ERROR_NO_DATA: + raise + # ERROR_NO_DATA can occur if a client has already connected, + # written data and then disconnected -- see Issue 14725. + else: + try: + _winapi.WaitForMultipleObjects([ov.event], False, INFINITE) + except: + ov.cancel() + _winapi.CloseHandle(handle) + raise + finally: + _, err = ov.GetOverlappedResult(True) + assert err == 0 + return PipeConnection(handle) + + @staticmethod + def _finalize_pipe_listener(queue, address): + sub_debug('closing listener with address=%r', address) + for handle in queue: + _winapi.CloseHandle(handle) + + def PipeClient(address, + errors=(_winapi.ERROR_SEM_TIMEOUT, + _winapi.ERROR_PIPE_BUSY)): + ''' + Return a connection object connected to the pipe given by `address` + ''' + t = _init_timeout() + while 1: + try: + _winapi.WaitNamedPipe(address, 1000) + h = _winapi.CreateFile( + address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, + 0, _winapi.NULL, _winapi.OPEN_EXISTING, + _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL + ) + except OSError as e: + if e.winerror not in errors or _check_timeout(t): + raise + else: + break + else: + raise + + _winapi.SetNamedPipeHandleState( + h, _winapi.PIPE_READMODE_MESSAGE, None, None + ) + return PipeConnection(h) + +# +# Authentication stuff +# + +MESSAGE_LENGTH = 20 + +CHALLENGE = b'#CHALLENGE#' +WELCOME = b'#WELCOME#' +FAILURE = b'#FAILURE#' + + +def deliver_challenge(connection, authkey): + import hmac + assert isinstance(authkey, bytes) + message = os.urandom(MESSAGE_LENGTH) + connection.send_bytes(CHALLENGE + message) + digest = hmac.new(authkey, message).digest() + response = connection.recv_bytes(256) # reject large message + if response == digest: + connection.send_bytes(WELCOME) + else: + connection.send_bytes(FAILURE) + raise AuthenticationError('digest received was wrong') + + +def answer_challenge(connection, authkey): + import hmac + assert isinstance(authkey, bytes) + message = connection.recv_bytes(256) # reject large message + assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message + message = message[len(CHALLENGE):] + digest = hmac.new(authkey, message).digest() + connection.send_bytes(digest) + response = connection.recv_bytes(256) # reject large message + if response != WELCOME: + raise AuthenticationError('digest sent was rejected') + +# +# Support for using xmlrpclib for serialization +# + + +class ConnectionWrapper(object): + + def __init__(self, conn, dumps, loads): + self._conn = conn + self._dumps = dumps + self._loads = loads + for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): + obj = getattr(conn, attr) + setattr(self, attr, obj) + + def send(self, obj): + s = self._dumps(obj) + self._conn.send_bytes(s) + + def recv(self): + s = self._conn.recv_bytes() + return self._loads(s) + + +def _xml_dumps(obj): + return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') # noqa + + +def _xml_loads(s): + (obj,), method = xmlrpclib.loads(s.decode('utf-8')) # noqa + return obj + + +class XmlListener(Listener): + def accept(self): + global xmlrpclib + import xmlrpc.client as xmlrpclib # noqa + obj = Listener.accept(self) + return ConnectionWrapper(obj, _xml_dumps, _xml_loads) + + +def XmlClient(*args, **kwds): + global xmlrpclib + import xmlrpc.client as xmlrpclib # noqa + return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) + +# +# Wait +# + +if sys.platform == 'win32': + + def _exhaustive_wait(handles, timeout): + # Return ALL handles which are currently signalled. (Only + # returning the first signalled might create starvation issues.) + L = list(handles) + ready = [] + while L: + res = _winapi.WaitForMultipleObjects(L, False, timeout) + if res == WAIT_TIMEOUT: + break + elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): + res -= WAIT_OBJECT_0 + elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): + res -= WAIT_ABANDONED_0 + else: + raise RuntimeError('Should not get here') + ready.append(L[res]) + L = L[res+1:] + timeout = 0 + return ready + + _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} + + def wait(object_list, timeout=None): + ''' + Wait till an object in object_list is ready/readable. + + Returns list of those objects in object_list which are ready/readable. + ''' + if timeout is None: + timeout = INFINITE + elif timeout < 0: + timeout = 0 + else: + timeout = int(timeout * 1000 + 0.5) + + object_list = list(object_list) + waithandle_to_obj = {} + ov_list = [] + ready_objects = set() + ready_handles = set() + + try: + for o in object_list: + try: + fileno = getattr(o, 'fileno') + except AttributeError: + waithandle_to_obj[o.__index__()] = o + else: + # start an overlapped read of length zero + try: + ov, err = _winapi.ReadFile(fileno(), 0, True) + except OSError as e: + err = e.winerror + if err not in _ready_errors: + raise + if err == _winapi.ERROR_IO_PENDING: + ov_list.append(ov) + waithandle_to_obj[ov.event] = o + else: + # If o.fileno() is an overlapped pipe handle and + # err == 0 then there is a zero length message + # in the pipe, but it HAS NOT been consumed. + ready_objects.add(o) + timeout = 0 + + ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) + finally: + # request that overlapped reads stop + for ov in ov_list: + ov.cancel() + + # wait for all overlapped reads to stop + for ov in ov_list: + try: + _, err = ov.GetOverlappedResult(True) + except OSError as e: + err = e.winerror + if err not in _ready_errors: + raise + if err != _winapi.ERROR_OPERATION_ABORTED: + o = waithandle_to_obj[ov.event] + ready_objects.add(o) + if err == 0: + # If o.fileno() is an overlapped pipe handle then + # a zero length message HAS been consumed. + if hasattr(o, '_got_empty_message'): + o._got_empty_message = True + + ready_objects.update(waithandle_to_obj[h] for h in ready_handles) + return [o for o in object_list if o in ready_objects] + +else: + + if hasattr(select, 'poll'): + def _poll(fds, timeout): + if timeout is not None: + timeout = int(timeout * 1000) # timeout is in milliseconds + fd_map = {} + pollster = select.poll() + for fd in fds: + pollster.register(fd, select.POLLIN) + if hasattr(fd, 'fileno'): + fd_map[fd.fileno()] = fd + else: + fd_map[fd] = fd + ls = [] + for fd, event in pollster.poll(timeout): + if event & select.POLLNVAL: + raise ValueError('invalid file descriptor %i' % fd) + ls.append(fd_map[fd]) + return ls + else: + def _poll(fds, timeout): # noqa + return select.select(fds, [], [], timeout)[0] + + def wait(object_list, timeout=None): # noqa + ''' + Wait till an object in object_list is ready/readable. + + Returns list of those objects in object_list which are ready/readable. + ''' + if timeout is not None: + if timeout <= 0: + return _poll(object_list, 0) + else: + deadline = monotonic() + timeout + while True: + try: + return _poll(object_list, timeout) + except OSError as e: + if e.errno != errno.EINTR: + raise + if timeout is not None: + timeout = deadline - monotonic() diff --git a/awx/lib/site-packages/billiard/_ext.py b/awx/lib/site-packages/billiard/_ext.py index 7d9caf01ae..fb2c055891 100644 --- a/awx/lib/site-packages/billiard/_ext.py +++ b/awx/lib/site-packages/billiard/_ext.py @@ -4,10 +4,7 @@ import sys supports_exec = True -try: - import _winapi as win32 -except ImportError: # pragma: no cover - win32 = None +from .compat import _winapi as win32 # noqa if sys.platform.startswith("java"): _billiard = None @@ -20,11 +17,9 @@ else: try: Connection = _billiard.Connection except AttributeError: # Py3 - from multiprocessing.connection import Connection # noqa + from billiard.connection import Connection # noqa PipeConnection = getattr(_billiard, "PipeConnection", None) - if win32 is None: - win32 = getattr(_billiard, "win32", None) # noqa def ensure_multiprocessing(): diff --git a/awx/lib/site-packages/billiard/_reduction.py b/awx/lib/site-packages/billiard/_reduction.py new file mode 100644 index 0000000000..7230287fae --- /dev/null +++ b/awx/lib/site-packages/billiard/_reduction.py @@ -0,0 +1,244 @@ +# +# Module to allow connection and socket objects to be transferred +# between processes +# +# multiprocessing/reduction.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +from __future__ import absolute_import + +__all__ = [] + +import os +import sys +import socket +import threading + +from pickle import Pickler + +from . import current_process +from ._ext import _billiard, win32 +from .util import register_after_fork, debug, sub_debug + +if not(sys.platform == 'win32' or hasattr(_billiard, 'recvfd')): + raise ImportError('pickling of connections not supported') + +close = win32.CloseHandle if sys.platform == 'win32' else os.close + +# globals set later +_listener = None +_lock = None +_cache = set() + +# +# ForkingPickler +# + + +class ForkingPickler(Pickler): # noqa + dispatch = Pickler.dispatch.copy() + + @classmethod + def register(cls, type, reduce): + def dispatcher(self, obj): + rv = reduce(obj) + self.save_reduce(obj=obj, *rv) + cls.dispatch[type] = dispatcher + + +def _reduce_method(m): # noqa + if m.__self__ is None: + return getattr, (m.__self__.__class__, m.__func__.__name__) + else: + return getattr, (m.__self__, m.__func__.__name__) +ForkingPickler.register(type(ForkingPickler.save), _reduce_method) + + +def _reduce_method_descriptor(m): + return getattr, (m.__objclass__, m.__name__) +ForkingPickler.register(type(list.append), _reduce_method_descriptor) +ForkingPickler.register(type(int.__add__), _reduce_method_descriptor) + +try: + from functools import partial +except ImportError: + pass +else: + + def _reduce_partial(p): + return _rebuild_partial, (p.func, p.args, p.keywords or {}) + + def _rebuild_partial(func, args, keywords): + return partial(func, *args, **keywords) + ForkingPickler.register(partial, _reduce_partial) + + +def dump(obj, file, protocol=None): + ForkingPickler(file, protocol).dump(obj) + +# +# Platform specific definitions +# + +if sys.platform == 'win32': + # XXX Should this subprocess import be here? + import _subprocess # noqa + + def send_handle(conn, handle, destination_pid): + from .forking import duplicate + process_handle = win32.OpenProcess( + win32.PROCESS_ALL_ACCESS, False, destination_pid + ) + try: + new_handle = duplicate(handle, process_handle) + conn.send(new_handle) + finally: + close(process_handle) + + def recv_handle(conn): + return conn.recv() + +else: + def send_handle(conn, handle, destination_pid): # noqa + _billiard.sendfd(conn.fileno(), handle) + + def recv_handle(conn): # noqa + return _billiard.recvfd(conn.fileno()) + +# +# Support for a per-process server thread which caches pickled handles +# + + +def _reset(obj): + global _lock, _listener, _cache + for h in _cache: + close(h) + _cache.clear() + _lock = threading.Lock() + _listener = None + +_reset(None) +register_after_fork(_reset, _reset) + + +def _get_listener(): + global _listener + + if _listener is None: + _lock.acquire() + try: + if _listener is None: + from .connection import Listener + debug('starting listener and thread for sending handles') + _listener = Listener(authkey=current_process().authkey) + t = threading.Thread(target=_serve) + t.daemon = True + t.start() + finally: + _lock.release() + + return _listener + + +def _serve(): + from .util import is_exiting, sub_warning + + while 1: + try: + conn = _listener.accept() + handle_wanted, destination_pid = conn.recv() + _cache.remove(handle_wanted) + send_handle(conn, handle_wanted, destination_pid) + close(handle_wanted) + conn.close() + except: + if not is_exiting(): + sub_warning('thread for sharing handles raised exception', + exc_info=True) + +# +# Functions to be used for pickling/unpickling objects with handles +# + + +def reduce_handle(handle): + from .forking import Popen, duplicate + if Popen.thread_is_spawning(): + return (None, Popen.duplicate_for_child(handle), True) + dup_handle = duplicate(handle) + _cache.add(dup_handle) + sub_debug('reducing handle %d', handle) + return (_get_listener().address, dup_handle, False) + + +def rebuild_handle(pickled_data): + from .connection import Client + address, handle, inherited = pickled_data + if inherited: + return handle + sub_debug('rebuilding handle %d', handle) + conn = Client(address, authkey=current_process().authkey) + conn.send((handle, os.getpid())) + new_handle = recv_handle(conn) + conn.close() + return new_handle + +# +# Register `_billiard.Connection` with `ForkingPickler` +# + + +def reduce_connection(conn): + rh = reduce_handle(conn.fileno()) + return rebuild_connection, (rh, conn.readable, conn.writable) + + +def rebuild_connection(reduced_handle, readable, writable): + handle = rebuild_handle(reduced_handle) + return _billiard.Connection( + handle, readable=readable, writable=writable + ) + +# Register `socket.socket` with `ForkingPickler` +# + + +def fromfd(fd, family, type_, proto=0): + s = socket.fromfd(fd, family, type_, proto) + if s.__class__ is not socket.socket: + s = socket.socket(_sock=s) + return s + + +def reduce_socket(s): + reduced_handle = reduce_handle(s.fileno()) + return rebuild_socket, (reduced_handle, s.family, s.type, s.proto) + + +def rebuild_socket(reduced_handle, family, type_, proto): + fd = rebuild_handle(reduced_handle) + _sock = fromfd(fd, family, type_, proto) + close(fd) + return _sock + +ForkingPickler.register(socket.socket, reduce_socket) + +# +# Register `_billiard.PipeConnection` with `ForkingPickler` +# + +if sys.platform == 'win32': + + def reduce_pipe_connection(conn): + rh = reduce_handle(conn.fileno()) + return rebuild_pipe_connection, (rh, conn.readable, conn.writable) + + def rebuild_pipe_connection(reduced_handle, readable, writable): + handle = rebuild_handle(reduced_handle) + return _billiard.PipeConnection( + handle, readable=readable, writable=writable + ) diff --git a/awx/lib/site-packages/billiard/_reduction3.py b/awx/lib/site-packages/billiard/_reduction3.py new file mode 100644 index 0000000000..c6d9bd272d --- /dev/null +++ b/awx/lib/site-packages/billiard/_reduction3.py @@ -0,0 +1,249 @@ +# +# Module which deals with pickling of objects. +# +# multiprocessing/reduction.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# +from __future__ import absolute_import + +import copyreg +import functools +import io +import os +import pickle +import socket +import sys + +__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] + + +HAVE_SEND_HANDLE = (sys.platform == 'win32' or + (hasattr(socket, 'CMSG_LEN') and + hasattr(socket, 'SCM_RIGHTS') and + hasattr(socket.socket, 'sendmsg'))) + +# +# Pickler subclass +# + + +class ForkingPickler(pickle.Pickler): + '''Pickler subclass used by multiprocessing.''' + _extra_reducers = {} + _copyreg_dispatch_table = copyreg.dispatch_table + + def __init__(self, *args): + super().__init__(*args) + self.dispatch_table = self._copyreg_dispatch_table.copy() + self.dispatch_table.update(self._extra_reducers) + + @classmethod + def register(cls, type, reduce): + '''Register a reduce function for a type.''' + cls._extra_reducers[type] = reduce + + @classmethod + def dumps(cls, obj, protocol=None): + buf = io.BytesIO() + cls(buf, protocol).dump(obj) + return buf.getbuffer() + + loads = pickle.loads + +register = ForkingPickler.register + + +def dump(obj, file, protocol=None): + '''Replacement for pickle.dump() using ForkingPickler.''' + ForkingPickler(file, protocol).dump(obj) + +# +# Platform specific definitions +# + +if sys.platform == 'win32': + # Windows + __all__ += ['DupHandle', 'duplicate', 'steal_handle'] + import _winapi + + def duplicate(handle, target_process=None, inheritable=False): + '''Duplicate a handle. (target_process is a handle not a pid!)''' + if target_process is None: + target_process = _winapi.GetCurrentProcess() + return _winapi.DuplicateHandle( + _winapi.GetCurrentProcess(), handle, target_process, + 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) + + def steal_handle(source_pid, handle): + '''Steal a handle from process identified by source_pid.''' + source_process_handle = _winapi.OpenProcess( + _winapi.PROCESS_DUP_HANDLE, False, source_pid) + try: + return _winapi.DuplicateHandle( + source_process_handle, handle, + _winapi.GetCurrentProcess(), 0, False, + _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) + finally: + _winapi.CloseHandle(source_process_handle) + + def send_handle(conn, handle, destination_pid): + '''Send a handle over a local connection.''' + dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) + conn.send(dh) + + def recv_handle(conn): + '''Receive a handle over a local connection.''' + return conn.recv().detach() + + class DupHandle(object): + '''Picklable wrapper for a handle.''' + def __init__(self, handle, access, pid=None): + if pid is None: + # We just duplicate the handle in the current process and + # let the receiving process steal the handle. + pid = os.getpid() + proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) + try: + self._handle = _winapi.DuplicateHandle( + _winapi.GetCurrentProcess(), + handle, proc, access, False, 0) + finally: + _winapi.CloseHandle(proc) + self._access = access + self._pid = pid + + def detach(self): + '''Get the handle. This should only be called once.''' + # retrieve handle from process which currently owns it + if self._pid == os.getpid(): + # The handle has already been duplicated for this process. + return self._handle + # We must steal the handle from the process whose pid is self._pid. + proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, + self._pid) + try: + return _winapi.DuplicateHandle( + proc, self._handle, _winapi.GetCurrentProcess(), + self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) + finally: + _winapi.CloseHandle(proc) + +else: + # Unix + __all__ += ['DupFd', 'sendfds', 'recvfds'] + import array + + # On MacOSX we should acknowledge receipt of fds -- see Issue14669 + ACKNOWLEDGE = sys.platform == 'darwin' + + def sendfds(sock, fds): + '''Send an array of fds over an AF_UNIX socket.''' + fds = array.array('i', fds) + msg = bytes([len(fds) % 256]) + sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) + if ACKNOWLEDGE and sock.recv(1) != b'A': + raise RuntimeError('did not receive acknowledgement of fd') + + def recvfds(sock, size): + '''Receive an array of fds over an AF_UNIX socket.''' + a = array.array('i') + bytes_size = a.itemsize * size + msg, ancdata, flags, addr = sock.recvmsg( + 1, socket.CMSG_LEN(bytes_size), + ) + if not msg and not ancdata: + raise EOFError + try: + if ACKNOWLEDGE: + sock.send(b'A') + if len(ancdata) != 1: + raise RuntimeError( + 'received %d items of ancdata' % len(ancdata), + ) + cmsg_level, cmsg_type, cmsg_data = ancdata[0] + if (cmsg_level == socket.SOL_SOCKET and + cmsg_type == socket.SCM_RIGHTS): + if len(cmsg_data) % a.itemsize != 0: + raise ValueError + a.frombytes(cmsg_data) + assert len(a) % 256 == msg[0] + return list(a) + except (ValueError, IndexError): + pass + raise RuntimeError('Invalid data received') + + def send_handle(conn, handle, destination_pid): # noqa + '''Send a handle over a local connection.''' + fd = conn.fileno() + with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s: + sendfds(s, [handle]) + + def recv_handle(conn): # noqa + '''Receive a handle over a local connection.''' + fd = conn.fileno() + with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s: + return recvfds(s, 1)[0] + + def DupFd(fd): + '''Return a wrapper for an fd.''' + from .forking import Popen + return Popen.duplicate_for_child(fd) + +# +# Try making some callable types picklable +# + + +def _reduce_method(m): + if m.__self__ is None: + return getattr, (m.__class__, m.__func__.__name__) + else: + return getattr, (m.__self__, m.__func__.__name__) + + +class _C: + def f(self): + pass +register(type(_C().f), _reduce_method) + + +def _reduce_method_descriptor(m): + return getattr, (m.__objclass__, m.__name__) +register(type(list.append), _reduce_method_descriptor) +register(type(int.__add__), _reduce_method_descriptor) + + +def _reduce_partial(p): + return _rebuild_partial, (p.func, p.args, p.keywords or {}) + + +def _rebuild_partial(func, args, keywords): + return functools.partial(func, *args, **keywords) +register(functools.partial, _reduce_partial) + +# +# Make sockets picklable +# + +if sys.platform == 'win32': + + def _reduce_socket(s): + from .resource_sharer import DupSocket + return _rebuild_socket, (DupSocket(s),) + + def _rebuild_socket(ds): + return ds.detach() + register(socket.socket, _reduce_socket) + +else: + + def _reduce_socket(s): # noqa + df = DupFd(s.fileno()) + return _rebuild_socket, (df, s.family, s.type, s.proto) + + def _rebuild_socket(df, family, type, proto): # noqa + fd = df.detach() + return socket.socket(family, type, proto, fileno=fd) + register(socket.socket, _reduce_socket) diff --git a/awx/lib/site-packages/billiard/_win.py b/awx/lib/site-packages/billiard/_win.py index fd6db55532..dc0262e83f 100644 --- a/awx/lib/site-packages/billiard/_win.py +++ b/awx/lib/site-packages/billiard/_win.py @@ -88,7 +88,7 @@ def get_all_processes_pids(): def get_processtree_pids(pid, include_parent=True): """Return a list with all the pids of a process tree""" parents = get_all_processes_pids() - all_pids = parents.keys() + all_pids = list(parents.keys()) pids = set([pid]) while 1: pids_new = pids.copy() diff --git a/awx/lib/site-packages/billiard/common.py b/awx/lib/site-packages/billiard/common.py index 5c367fd879..93e5a6015e 100644 --- a/awx/lib/site-packages/billiard/common.py +++ b/awx/lib/site-packages/billiard/common.py @@ -4,10 +4,10 @@ This module contains utilities added by billiard, to keep "non-core" functionality out of ``.util``.""" from __future__ import absolute_import +import os import signal import sys -from time import time import pickle as pypickle try: import cPickle as cpickle @@ -15,6 +15,7 @@ except ImportError: # pragma: no cover cpickle = None # noqa from .exceptions import RestartFreqExceeded +from .five import monotonic if sys.version_info < (2, 6): # pragma: no cover # cPickle does not use absolute_imports @@ -36,16 +37,15 @@ else: except ImportError: from StringIO import StringIO as BytesIO # noqa +EX_SOFTWARE = 70 + TERMSIGS = ( 'SIGHUP', 'SIGQUIT', - 'SIGILL', 'SIGTRAP', 'SIGABRT', 'SIGEMT', - 'SIGFPE', 'SIGBUS', - 'SIGSEGV', 'SIGSYS', 'SIGPIPE', 'SIGALRM', @@ -58,13 +58,33 @@ TERMSIGS = ( 'SIGUSR2', ) +#: set by signal handlers just before calling exit. +#: if this is true after the sighandler returns it means that something +#: went wrong while terminating the process, and :func:`os._exit` +#: must be called ASAP. +_should_have_exited = [False] + def pickle_loads(s, load=pickle_load): # used to support buffer objects return load(BytesIO(s)) +def maybe_setsignal(signum, handler): + try: + signal.signal(signum, handler) + except (OSError, AttributeError, ValueError, RuntimeError): + pass + + def _shutdown_cleanup(signum, frame): + # we will exit here so if the signal is received a second time + # we can be sure that something is very wrong and we may be in + # a crashing loop. + if _should_have_exited[0]: + os._exit(EX_SOFTWARE) + maybe_setsignal(signum, signal.SIG_DFL) + _should_have_exited[0] = True sys.exit(-(256 - signum)) @@ -72,11 +92,12 @@ def reset_signals(handler=_shutdown_cleanup): for sig in TERMSIGS: try: signum = getattr(signal, sig) + except AttributeError: + pass + else: current = signal.getsignal(signum) if current is not None and current != signal.SIG_IGN: - signal.signal(signum, handler) - except (OSError, AttributeError, ValueError, RuntimeError): - pass + maybe_setsignal(signum, handler) class restart_state(object): @@ -87,7 +108,7 @@ class restart_state(object): self.R, self.T = 0, None def step(self, now=None): - now = time() if now is None else now + now = monotonic() if now is None else now R = self.R if self.T and now - self.T >= self.maxT: # maxT passed, reset counter and time passed. @@ -98,9 +119,8 @@ class restart_state(object): # the startup probably went fine (startup restart burst # protection) if self.R: # pragma: no cover - pass - self.R = 0 # reset in case someone catches the error - raise self.RestartFreqExceeded("%r in %rs" % (R, self.maxT)) + self.R = 0 # reset in case someone catches the error + raise self.RestartFreqExceeded("%r in %rs" % (R, self.maxT)) # first run sets T if self.T is None: self.T = now diff --git a/awx/lib/site-packages/billiard/compat.py b/awx/lib/site-packages/billiard/compat.py index ac31f3b84b..113c121c6f 100644 --- a/awx/lib/site-packages/billiard/compat.py +++ b/awx/lib/site-packages/billiard/compat.py @@ -3,13 +3,50 @@ from __future__ import absolute_import import errno import os import sys -import __builtin__ + +from .five import builtins, range + +if sys.platform == 'win32': + try: + import _winapi # noqa + except ImportError: # pragma: no cover + try: + from _billiard import win32 as _winapi # noqa + except (ImportError, AttributeError): + from _multiprocessing import win32 as _winapi # noqa +else: + _winapi = None # noqa + + +try: + buf_t, is_new_buffer = memoryview, True # noqa +except NameError: # Py2.6 + buf_t, is_new_buffer = buffer, False # noqa + +if hasattr(os, 'write'): + __write__ = os.write + + if is_new_buffer: + + def send_offset(fd, buf, offset): + return __write__(fd, buf[offset:]) + + else: # Py2.6 + + def send_offset(fd, buf, offset): # noqa + return __write__(fd, buf_t(buf, offset)) + +else: # non-posix platform + + def send_offset(fd, buf, offset): # noqa + raise NotImplementedError('send_offset') + if sys.version_info[0] == 3: bytes = bytes else: try: - _bytes = __builtin__.bytes + _bytes = builtins.bytes except AttributeError: _bytes = str @@ -25,10 +62,10 @@ try: except AttributeError: def closerange(fd_low, fd_high): # noqa - for fd in reversed(xrange(fd_low, fd_high)): + for fd in reversed(range(fd_low, fd_high)): try: os.close(fd) - except OSError, exc: + except OSError as exc: if exc.errno != errno.EBADF: raise @@ -46,3 +83,26 @@ def get_errno(exc): except AttributeError: pass return 0 + + +if sys.platform == 'win32': + + def setblocking(handle, blocking): + raise NotImplementedError('setblocking not implemented on win32') + + def isblocking(handle): + raise NotImplementedError('isblocking not implemented on win32') + +else: + from os import O_NONBLOCK + from fcntl import fcntl, F_GETFL, F_SETFL + + def isblocking(handle): # noqa + return not (fcntl(handle, F_GETFL) & O_NONBLOCK) + + def setblocking(handle, blocking): # noqa + flags = fcntl(handle, F_GETFL, 0) + fcntl( + handle, F_SETFL, + flags & (~O_NONBLOCK) if blocking else flags | O_NONBLOCK, + ) diff --git a/awx/lib/site-packages/billiard/connection.py b/awx/lib/site-packages/billiard/connection.py index 58e9a2ffa3..dda3ee5cc5 100644 --- a/awx/lib/site-packages/billiard/connection.py +++ b/awx/lib/site-packages/billiard/connection.py @@ -1,11 +1,27 @@ from __future__ import absolute_import - import sys +is_pypy = hasattr(sys, 'pypy_version_info') + if sys.version_info[0] == 3: - from multiprocessing import connection + from . import _connection3 as connection else: - from billiard import _connection as connection # noqa + from . import _connection as connection # noqa + + +if is_pypy: + import _multiprocessing + from .compat import setblocking, send_offset + + class Connection(_multiprocessing.Connection): + + def send_offset(self, buf, offset): + return send_offset(self.fileno(), buf, offset) + + def setblocking(self, blocking): + setblocking(self.fileno(), blocking) + _multiprocessing.Connection = Connection + sys.modules[__name__] = connection diff --git a/awx/lib/site-packages/billiard/dummy/__init__.py b/awx/lib/site-packages/billiard/dummy/__init__.py index 96b996fdda..369b6a81d0 100644 --- a/awx/lib/site-packages/billiard/dummy/__init__.py +++ b/awx/lib/site-packages/billiard/dummy/__init__.py @@ -50,12 +50,10 @@ import array from threading import Lock, RLock, Semaphore, BoundedSemaphore from threading import Event -from Queue import Queue -if sys.version_info[0] == 3: - from multiprocessing.connection import Pipe -else: - from billiard._connection import Pipe +from billiard.five import Queue + +from billiard.connection import Pipe class DummyProcess(threading.Thread): @@ -91,7 +89,7 @@ class Condition(_Condition): if sys.version_info[0] == 3: notify_all = _Condition.notifyAll else: - notify_all = _Condition.notifyAll.im_func + notify_all = _Condition.notifyAll.__func__ Process = DummyProcess @@ -117,7 +115,7 @@ class Namespace(object): self.__dict__.update(kwds) def __repr__(self): - items = self.__dict__.items() + items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): diff --git a/awx/lib/site-packages/billiard/dummy/connection.py b/awx/lib/site-packages/billiard/dummy/connection.py index 62f8ae510f..46502ee31f 100644 --- a/awx/lib/site-packages/billiard/dummy/connection.py +++ b/awx/lib/site-packages/billiard/dummy/connection.py @@ -35,7 +35,7 @@ from __future__ import absolute_import __all__ = ['Client', 'Listener', 'Pipe'] -from Queue import Queue +from billiard.five import Queue families = [None] diff --git a/awx/lib/site-packages/billiard/einfo.py b/awx/lib/site-packages/billiard/einfo.py index 3b6848cca5..e761620d66 100644 --- a/awx/lib/site-packages/billiard/einfo.py +++ b/awx/lib/site-packages/billiard/einfo.py @@ -32,7 +32,7 @@ class _Frame(object): class _Object(object): def __init__(self, **kw): - [setattr(self, k, v) for k, v in kw.iteritems()] + [setattr(self, k, v) for k, v in kw.items()] class _Truncated(object): diff --git a/awx/lib/site-packages/billiard/five.py b/awx/lib/site-packages/billiard/five.py new file mode 100644 index 0000000000..1eeded46db --- /dev/null +++ b/awx/lib/site-packages/billiard/five.py @@ -0,0 +1,189 @@ +# -*- coding: utf-8 -*- +""" + celery.five + ~~~~~~~~~~~ + + Compatibility implementations of features + only available in newer Python versions. + + +""" +from __future__ import absolute_import + +############## py3k ######################################################### +import sys +PY3 = sys.version_info[0] == 3 + +try: + reload = reload # noqa +except NameError: # pragma: no cover + from imp import reload # noqa + +try: + from UserList import UserList # noqa +except ImportError: # pragma: no cover + from collections import UserList # noqa + +try: + from UserDict import UserDict # noqa +except ImportError: # pragma: no cover + from collections import UserDict # noqa + +############## time.monotonic ################################################ + +if sys.version_info < (3, 3): + + import platform + SYSTEM = platform.system() + + if SYSTEM == 'Darwin': + import ctypes + libSystem = ctypes.CDLL('libSystem.dylib') + CoreServices = ctypes.CDLL( + '/System/Library/Frameworks/CoreServices.framework/CoreServices', + use_errno=True, + ) + mach_absolute_time = libSystem.mach_absolute_time + mach_absolute_time.restype = ctypes.c_uint64 + absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds + absolute_to_nanoseconds.restype = ctypes.c_uint64 + absolute_to_nanoseconds.argtypes = [ctypes.c_uint64] + + def _monotonic(): + return absolute_to_nanoseconds(mach_absolute_time()) * 1e-9 + + elif SYSTEM == 'Linux': + # from stackoverflow: + # questions/1205722/how-do-i-get-monotonic-time-durations-in-python + import ctypes + import os + + CLOCK_MONOTONIC = 1 # see + + class timespec(ctypes.Structure): + _fields_ = [ + ('tv_sec', ctypes.c_long), + ('tv_nsec', ctypes.c_long), + ] + + librt = ctypes.CDLL('librt.so.1', use_errno=True) + clock_gettime = librt.clock_gettime + clock_gettime.argtypes = [ + ctypes.c_int, ctypes.POINTER(timespec), + ] + + def _monotonic(): # noqa + t = timespec() + if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0: + errno_ = ctypes.get_errno() + raise OSError(errno_, os.strerror(errno_)) + return t.tv_sec + t.tv_nsec * 1e-9 + else: + from time import time as _monotonic + +try: + from time import monotonic +except ImportError: + monotonic = _monotonic # noqa + +if PY3: + import builtins + + from queue import Queue, Empty, Full + from itertools import zip_longest + from io import StringIO, BytesIO + + map = map + string = str + string_t = str + long_t = int + text_t = str + range = range + int_types = (int, ) + + open_fqdn = 'builtins.open' + + def items(d): + return d.items() + + def keys(d): + return d.keys() + + def values(d): + return d.values() + + def nextfun(it): + return it.__next__ + + exec_ = getattr(builtins, 'exec') + + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + + class WhateverIO(StringIO): + + def write(self, data): + if isinstance(data, bytes): + data = data.encode() + StringIO.write(self, data) + +else: + import __builtin__ as builtins # noqa + from Queue import Queue, Empty, Full # noqa + from itertools import imap as map, izip_longest as zip_longest # noqa + from StringIO import StringIO # noqa + string = unicode # noqa + string_t = basestring # noqa + text_t = unicode + long_t = long # noqa + range = xrange + int_types = (int, long) + + open_fqdn = '__builtin__.open' + + def items(d): # noqa + return d.iteritems() + + def keys(d): # noqa + return d.iterkeys() + + def values(d): # noqa + return d.itervalues() + + def nextfun(it): # noqa + return it.next + + def exec_(code, globs=None, locs=None): + """Execute code in a namespace.""" + if globs is None: + frame = sys._getframe(1) + globs = frame.f_globals + if locs is None: + locs = frame.f_locals + del frame + elif locs is None: + locs = globs + exec("""exec code in globs, locs""") + + exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""") + + BytesIO = WhateverIO = StringIO # noqa + + +def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])): + """Class decorator to set metaclass. + + Works with both Python 3 and Python 3 and it does not add + an extra class in the lookup order like ``six.with_metaclass`` does + (that is -- it copies the original class instead of using inheritance). + + """ + + def _clone_with_metaclass(Class): + attrs = dict((key, value) for key, value in items(vars(Class)) + if key not in skip_attrs) + return Type(Class.__name__, Class.__bases__, attrs) + + return _clone_with_metaclass diff --git a/awx/lib/site-packages/billiard/forking.py b/awx/lib/site-packages/billiard/forking.py index 1f1d311dd5..4557359cab 100644 --- a/awx/lib/site-packages/billiard/forking.py +++ b/awx/lib/site-packages/billiard/forking.py @@ -14,12 +14,15 @@ import sys import signal import warnings -from ._ext import Connection, PipeConnection, win32 from pickle import load, HIGHEST_PROTOCOL -from billiard import util, process +from billiard import util +from billiard import process +from billiard.five import int_types +from .reduction import dump +from .compat import _winapi as win32 __all__ = ['Popen', 'assert_spawning', 'exit', - 'duplicate', 'close', 'ForkingPickler'] + 'duplicate', 'close'] try: WindowsError = WindowsError # noqa @@ -53,105 +56,16 @@ def assert_spawning(self): ' through inheritance' % type(self).__name__ ) -# -# Try making some callable types picklable -# -from pickle import Pickler - -if sys.version_info[0] == 3: - from copyreg import dispatch_table - - class ForkingPickler(Pickler): - _extra_reducers = {} - - def __init__(self, *args, **kwargs): - Pickler.__init__(self, *args, **kwargs) - self.dispatch_table = dispatch_table.copy() - self.dispatch_table.update(self._extra_reducers) - - @classmethod - def register(cls, type, reduce): - cls._extra_reducers[type] = reduce - - def _reduce_method(m): - if m.__self__ is None: - return getattr, (m.__class__, m.__func__.__name__) - else: - return getattr, (m.__self__, m.__func__.__name__) - - class _C: - def f(self): - pass - ForkingPickler.register(type(_C().f), _reduce_method) - -else: - - class ForkingPickler(Pickler): # noqa - dispatch = Pickler.dispatch.copy() - - @classmethod - def register(cls, type, reduce): - def dispatcher(self, obj): - rv = reduce(obj) - self.save_reduce(obj=obj, *rv) - cls.dispatch[type] = dispatcher - - def _reduce_method(m): # noqa - if m.im_self is None: - return getattr, (m.im_class, m.im_func.func_name) - else: - return getattr, (m.im_self, m.im_func.func_name) - ForkingPickler.register(type(ForkingPickler.save), _reduce_method) - - -def _reduce_method_descriptor(m): - return getattr, (m.__objclass__, m.__name__) -ForkingPickler.register(type(list.append), _reduce_method_descriptor) -ForkingPickler.register(type(int.__add__), _reduce_method_descriptor) - -try: - from functools import partial -except ImportError: - pass -else: - - def _reduce_partial(p): - return _rebuild_partial, (p.func, p.args, p.keywords or {}) - - def _rebuild_partial(func, args, keywords): - return partial(func, *args, **keywords) - ForkingPickler.register(partial, _reduce_partial) - - -def dump(obj, file, protocol=None): - ForkingPickler(file, protocol).dump(obj) - -# -# Make (Pipe)Connection picklable -# - - -def reduce_connection(conn): - # XXX check not necessary since only registered with ForkingPickler - if not Popen.thread_is_spawning(): - raise RuntimeError( - 'By default %s objects can only be shared between processes\n' - 'using inheritance' % type(conn).__name__ - ) - return type(conn), (Popen.duplicate_for_child(conn.fileno()), - conn.readable, conn.writable) - -ForkingPickler.register(Connection, reduce_connection) -if PipeConnection: - ForkingPickler.register(PipeConnection, reduce_connection) - # # Unix # if sys.platform != 'win32': - import thread + try: + import thread + except ImportError: + import _thread as thread # noqa import select WINEXE = False @@ -172,6 +86,8 @@ if sys.platform != 'win32': _tls = thread._local() def __init__(self, process_obj): + # register reducers + from billiard import connection # noqa _Django_old_layout_hack__save() sys.stdout.flush() sys.stderr.flush() @@ -265,9 +181,15 @@ if sys.platform != 'win32': # else: - import thread + try: + import thread + except ImportError: + import _thread as thread # noqa import msvcrt - import _subprocess + try: + import _subprocess + except ImportError: + import _winapi as _subprocess # noqa # # @@ -287,10 +209,14 @@ else: def duplicate(handle, target_process=None, inheritable=False): if target_process is None: target_process = _subprocess.GetCurrentProcess() - return _subprocess.DuplicateHandle( + h = _subprocess.DuplicateHandle( _subprocess.GetCurrentProcess(), handle, target_process, 0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS - ).Detach() + ) + if sys.version_info[0] < 3 or ( + sys.version_info[0] == 3 and sys.version_info[1] < 3): + h = h.Detach() + return h # # We define a Popen class similar to the one from subprocess, but @@ -318,8 +244,9 @@ else: hp, ht, pid, tid = _subprocess.CreateProcess( _python_exe, cmd, None, None, 1, 0, None, None, None ) - ht.Close() - close(rhandle) + close(ht) if isinstance(ht, int_types) else ht.Close() + (close(rhandle) if isinstance(rhandle, int_types) + else rhandle.Close()) # set attributes of self self.pid = pid @@ -566,22 +493,6 @@ def get_preparation_data(name): return d - # - # Make (Pipe)Connection picklable - # - - def reduce_connection(conn): - if not Popen.thread_is_spawning(): - raise RuntimeError( - 'By default %s objects can only be shared between processes\n' - 'using inheritance' % type(conn).__name__ - ) - return type(conn), (Popen.duplicate_for_child(conn.fileno()), - conn.readable, conn.writable) - - ForkingPickler.register(Connection, reduce_connection) - ForkingPickler.register(PipeConnection, reduce_connection) - # # Prepare current process # @@ -659,7 +570,7 @@ def prepare(data): # Try to make the potentially picklable objects in # sys.modules['__main__'] realize they are in the main # module -- somewhat ugly. - for obj in main_module.__dict__.values(): + for obj in list(main_module.__dict__.values()): try: if obj.__module__ == '__parents_main__': obj.__module__ = '__main__' diff --git a/awx/lib/site-packages/billiard/heap.py b/awx/lib/site-packages/billiard/heap.py index bbc0bfc809..027a050559 100644 --- a/awx/lib/site-packages/billiard/heap.py +++ b/awx/lib/site-packages/billiard/heap.py @@ -17,7 +17,8 @@ import itertools from ._ext import _billiard, win32 from .util import Finalize, info, get_temp_dir -from .forking import assert_spawning, ForkingPickler +from .forking import assert_spawning +from .reduction import ForkingPickler __all__ = ['BufferWrapper'] @@ -38,7 +39,7 @@ if sys.platform == 'win32': def __init__(self, size): self.size = size - self.name = 'pym-%d-%d' % (os.getpid(), Arena._counter.next()) + self.name = 'pym-%d-%d' % (os.getpid(), next(Arena._counter)) self.buffer = mmap.mmap(-1, self.size, tagname=self.name) assert win32.GetLastError() == 0, 'tagname already in use' self._state = (self.size, self.name) @@ -65,9 +66,9 @@ else: if fileno == -1 and not _forking_is_enabled: name = os.path.join( get_temp_dir(), - 'pym-%d-%d' % (os.getpid(), self._counter.next())) + 'pym-%d-%d' % (os.getpid(), next(self._counter))) self.fileno = os.open( - name, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0600) + name, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0o600) os.unlink(name) os.ftruncate(self.fileno, size) self.buffer = mmap.mmap(self.fileno, self.size) diff --git a/awx/lib/site-packages/billiard/managers.py b/awx/lib/site-packages/billiard/managers.py index a116723c5e..e9ac30abe3 100644 --- a/awx/lib/site-packages/billiard/managers.py +++ b/awx/lib/site-packages/billiard/managers.py @@ -8,7 +8,6 @@ # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import -from __future__ import with_statement __all__ = ['BaseManager', 'SyncManager', 'BaseProxy', 'Token'] @@ -19,14 +18,15 @@ __all__ = ['BaseManager', 'SyncManager', 'BaseProxy', 'Token'] import sys import threading import array -import Queue +from collections import Callable from traceback import format_exc -from time import time as _time from . import Process, current_process, active_children, Pool, util, connection +from .five import Queue, items, monotonic from .process import AuthenticationString -from .forking import exit, Popen, ForkingPickler +from .forking import exit, Popen +from .reduction import ForkingPickler from .util import Finalize, error, info # @@ -123,7 +123,7 @@ def all_methods(obj): temp = [] for name in dir(obj): func = getattr(obj, name) - if callable(func): + if isinstance(func, Callable): temp.append(name) return temp @@ -205,14 +205,14 @@ class Server(object): msg = ('#RETURN', result) try: c.send(msg) - except Exception, e: + except Exception as exc: try: c.send(('#TRACEBACK', format_exc())) except Exception: pass info('Failure to send message: %r', msg) info(' ... request was %r', request) - info(' ... exception was %r', e) + info(' ... exception was %r', exc) c.close() @@ -245,8 +245,8 @@ class Server(object): try: res = function(*args, **kwds) - except Exception, e: - msg = ('#ERROR', e) + except Exception as exc: + msg = ('#ERROR', exc) else: typeid = gettypeid and gettypeid.get(methodname, None) if typeid: @@ -280,13 +280,13 @@ class Server(object): try: try: send(msg) - except Exception, e: + except Exception: send(('#UNSERIALIZABLE', repr(msg))) - except Exception, e: + except Exception as exc: info('exception in thread serving %r', threading.currentThread().name) info(' ... message was %r', msg) - info(' ... exception was %r', e) + info(' ... exception was %r', exc) conn.close() sys.exit(1) @@ -314,7 +314,7 @@ class Server(object): ''' with self.mutex: result = [] - keys = self.id_to_obj.keys() + keys = list(self.id_to_obj.keys()) keys.sort() for ident in keys: if ident != '0': @@ -492,7 +492,8 @@ class BaseManager(object): ''' assert self._state.value == State.INITIAL - if initializer is not None and not callable(initializer): + if initializer is not None and \ + not isinstance(initializer, Callable): raise TypeError('initializer must be a callable') # pipe over which we will retrieve address of server @@ -641,7 +642,7 @@ class BaseManager(object): ) if method_to_typeid: - for key, value in method_to_typeid.items(): + for key, value in items(method_to_typeid): assert type(key) is str, '%r is not a string' % key assert type(value) is str, '%r is not a string' % value @@ -797,8 +798,8 @@ class BaseProxy(object): util.debug('DECREF %r', token.id) conn = _Client(token.address, authkey=authkey) dispatch(conn, None, 'decref', (token.id,)) - except Exception, e: - util.debug('... decref failed %s', e) + except Exception as exc: + util.debug('... decref failed %s', exc) else: util.debug('DECREF %r -- manager already shutdown', token.id) @@ -815,9 +816,9 @@ class BaseProxy(object): self._manager = None try: self._incref() - except Exception, e: + except Exception as exc: # the proxy may just be for a manager which has shutdown - info('incref failed: %s', e) + info('incref failed: %s', exc) def __reduce__(self): kwds = {} @@ -933,7 +934,7 @@ class Namespace(object): self.__dict__.update(kwds) def __repr__(self): - items = self.__dict__.items() + items = list(self.__dict__.items()) temp = [] for name, value in items: if not name.startswith('_'): @@ -1026,13 +1027,13 @@ class ConditionProxy(AcquirerProxy): if result: return result if timeout is not None: - endtime = _time() + timeout + endtime = monotonic() + timeout else: endtime = None waittime = None while not result: if endtime is not None: - waittime = endtime - _time() + waittime = endtime - monotonic() if waittime <= 0: break self.wait(waittime) @@ -1149,8 +1150,8 @@ class SyncManager(BaseManager): this class. ''' -SyncManager.register('Queue', Queue.Queue) -SyncManager.register('JoinableQueue', Queue.Queue) +SyncManager.register('Queue', Queue) +SyncManager.register('JoinableQueue', Queue) SyncManager.register('Event', threading.Event, EventProxy) SyncManager.register('Lock', threading.Lock, AcquirerProxy) SyncManager.register('RLock', threading.RLock, AcquirerProxy) diff --git a/awx/lib/site-packages/billiard/pool.py b/awx/lib/site-packages/billiard/pool.py index 33e0fe0d29..536787f8c1 100644 --- a/awx/lib/site-packages/billiard/pool.py +++ b/awx/lib/site-packages/billiard/pool.py @@ -8,13 +8,11 @@ # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import -from __future__ import with_statement # # Imports # -import collections import errno import itertools import os @@ -23,13 +21,15 @@ import signal import sys import threading import time -import Queue import warnings +from collections import Callable, deque +from functools import partial + from . import Event, Process, cpu_count from . import util from .common import pickle_loads, reset_signals, restart_state -from .compat import get_errno +from .compat import get_errno, send_offset from .einfo import ExceptionInfo from .exceptions import ( CoroStop, @@ -40,8 +40,11 @@ from .exceptions import ( TimeoutError, WorkerLostError, ) +from .five import Empty, Queue, range, values, reraise, monotonic from .util import Finalize, debug +PY3 = sys.version_info[0] == 3 + if platform.system() == 'Windows': # pragma: no cover # On Windows os.kill calls TerminateProcess which cannot be # handled by # any process, so this is needed to terminate the task @@ -51,30 +54,22 @@ else: from os import kill as _kill # noqa -try: - next = next -except NameError: - def next(it, *args): # noqa - try: - return it.next() - except StopIteration: - if not args: - raise - return args[0] - -PY3 = sys.version_info[0] == 3 - try: TIMEOUT_MAX = threading.TIMEOUT_MAX except AttributeError: # pragma: no cover TIMEOUT_MAX = 1e10 # noqa -if PY3: +if sys.version_info >= (3, 3): _Semaphore = threading.Semaphore else: + # Semaphore is a factory function pointing to _Semaphore _Semaphore = threading._Semaphore # noqa +SIGMAP = dict( + (getattr(signal, n), n) for n in dir(signal) if n.startswith('SIG') +) + # # Constants representing the state of a pool # @@ -89,6 +84,8 @@ TERMINATE = 2 ACK = 0 READY = 1 +TASK = 2 +NACK = 3 # # Exit code constants @@ -110,9 +107,30 @@ EX_OK = getattr(os, "EX_OK", 0) job_counter = itertools.count() +Lock = threading.Lock + + +def _get_send_offset(connection): + try: + native = connection.send_offset + except AttributeError: + native = None + if native is None: + return partial(send_offset, connection.fileno()) + return native + + +def human_status(status): + if status < 0: + try: + return 'signal {0} ({1})'.format(-status, SIGMAP[-status]) + except KeyError: + return 'signal {0}'.format(-status) + return 'exitcode {0}'.format(status) + def mapstar(args): - return map(*args) + return list(map(*args)) def starmapstar(args): @@ -213,102 +231,216 @@ def soft_timeout_sighandler(signum, frame): # -def worker(inqueue, outqueue, initializer=None, initargs=(), - maxtasks=None, sentinel=None): - pid = os.getpid() - assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0) - put = outqueue.put - get = inqueue.get - loads = pickle_loads +class Worker(Process): + _controlled_termination = False + _job_terminated = False - if hasattr(inqueue, '_reader'): + def __init__(self, inq, outq, synq=None, initializer=None, initargs=(), + maxtasks=None, sentinel=None, on_exit=None): + assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0) + self.initializer = initializer + self.initargs = initargs + self.maxtasks = maxtasks + self._shutdown = sentinel + self.on_exit = on_exit + self.inq, self.outq, self.synq = inq, outq, synq + self._make_shortcuts() - if hasattr(inqueue, 'get_payload') and inqueue.get_payload: - get_payload = inqueue.get_payload + super(Worker, self).__init__() - def poll(timeout): - if inqueue._reader.poll(timeout): - return True, loads(get_payload()) - return False, None + def __reduce__(self): + return self.__class__, ( + self.inq, self.outq, self.synq, self.initializer, + self.initargs, self.maxtasks, self._shutdown, + ) + + def _make_shortcuts(self): + self.inqW_fd = self.inq._writer.fileno() # inqueue write fd + self.outqR_fd = self.outq._reader.fileno() # outqueue read fd + if self.synq: + self.synqR_fd = self.synq._reader.fileno() # synqueue read fd + self.synqW_fd = self.synq._writer.fileno() # synqueue write fd + self.send_syn_offset = _get_send_offset(self.synq._writer) else: - def poll(timeout): - if inqueue._reader.poll(timeout): - return True, get() - return False, None - else: + self.synqR_fd = self.synqW_fd = self._send_syn_offset = None + self._quick_put = self.inq._writer.send + self._quick_get = self.outq._reader.recv + self.send_job_offset = _get_send_offset(self.inq._writer) - def poll(timeout): # noqa - try: - return True, get(timeout=timeout) - except Queue.Empty: - return False, None + def run(self): + _exit = sys.exit + _exitcode = [None] - if hasattr(inqueue, '_writer'): - inqueue._writer.close() - outqueue._reader.close() + def exit(status=None): + _exitcode[0] = status + return _exit() + sys.exit = exit - if initializer is not None: - initializer(*initargs) + pid = os.getpid() - # Make sure all exiting signals call finally: blocks. - # this is important for the semaphore to be released. - reset_signals() + self._make_child_methods() + self.after_fork() + self.on_loop_start(pid=pid) # callback on loop start + try: + sys.exit(self.workloop(pid=pid)) + except Exception as exc: + error('Pool process %r error: %r', self, exc, exc_info=1) + self._do_exit(pid, _exitcode[0], exc) + finally: + self._do_exit(pid, _exitcode[0], None) - # install signal handler for soft timeouts. - if SIG_SOFT_TIMEOUT is not None: - signal.signal(SIG_SOFT_TIMEOUT, soft_timeout_sighandler) + def _do_exit(self, pid, exitcode, exc=None): + if exitcode is None: + exitcode = EX_FAILURE if exc else EX_OK - try: - signal.signal(signal.SIGINT, signal.SIG_IGN) - except (AttributeError): + if self.on_exit is not None: + self.on_exit(pid, exitcode) + os._exit(exitcode) + + def on_loop_start(self, pid): pass - exitcode = None - completed = 0 - while maxtasks is None or (maxtasks and completed < maxtasks): - if sentinel is not None and sentinel.is_set(): - debug('worker got sentinel -- exiting') - exitcode = EX_OK - break + def terminate_controlled(self): + self._controlled_termination = True + self.terminate() + + def prepare_result(self, result): + return result + + def workloop(self, debug=debug, now=monotonic, pid=None): + pid = pid or os.getpid() + put = self.outq.put + inqW_fd = self.inqW_fd + synqW_fd = self.synqW_fd + maxtasks = self.maxtasks + prepare_result = self.prepare_result + + wait_for_job = self.wait_for_job + _wait_for_syn = self.wait_for_syn + + def wait_for_syn(jid): + i = 0 + while 1: + if i > 60: + error('!!!WAIT FOR ACK TIMEOUT: job:%r fd:%r!!!', + jid, self.synq._reader.fileno(), exc_info=1) + req = _wait_for_syn() + if req: + type_, args = req + if type_ == NACK: + return False + assert type_ == ACK + return True + i += 1 + + completed = 0 + while maxtasks is None or (maxtasks and completed < maxtasks): + req = wait_for_job() + if req: + type_, args_ = req + assert type_ == TASK + job, i, fun, args, kwargs = args_ + put((ACK, (job, i, now(), pid, synqW_fd))) + if _wait_for_syn: + confirm = wait_for_syn(job) + if not confirm: + continue # received NACK + try: + result = (True, prepare_result(fun(*args, **kwargs))) + except Exception: + result = (False, ExceptionInfo()) + try: + put((READY, (job, i, result, inqW_fd))) + except Exception as exc: + _, _, tb = sys.exc_info() + try: + wrapped = MaybeEncodingError(exc, result[1]) + einfo = ExceptionInfo(( + MaybeEncodingError, wrapped, tb, + )) + put((READY, (job, i, (False, einfo), inqW_fd))) + finally: + del(tb) + completed += 1 + debug('worker exiting after %d tasks', completed) + if maxtasks: + return EX_RECYCLE if completed == maxtasks else EX_FAILURE + return EX_OK + + def after_fork(self): + if hasattr(self.inq, '_writer'): + self.inq._writer.close() + if hasattr(self.outq, '_reader'): + self.outq._reader.close() + + if self.initializer is not None: + self.initializer(*self.initargs) + + # Make sure all exiting signals call finally: blocks. + # This is important for the semaphore to be released. + reset_signals() + + # install signal handler for soft timeouts. + if SIG_SOFT_TIMEOUT is not None: + signal.signal(SIG_SOFT_TIMEOUT, soft_timeout_sighandler) try: - ready, task = poll(1.0) - if not ready: - continue - except (EOFError, IOError), exc: - if get_errno(exc) == errno.EINTR: - continue # interrupted, maybe by gdb - debug('worker got EOFError or IOError -- exiting') - exitcode = EX_FAILURE - break + signal.signal(signal.SIGINT, signal.SIG_IGN) + except AttributeError: + pass - if task is None: - debug('worker got sentinel -- exiting') - exitcode = EX_OK - break + def _make_recv_method(self, conn): + get = conn.get - job, i, func, args, kwds = task - put((ACK, (job, i, time.time(), pid))) - try: - result = (True, func(*args, **kwds)) - except Exception: - result = (False, ExceptionInfo()) - try: - put((READY, (job, i, result))) - except Exception, exc: - _, _, tb = sys.exc_info() + if hasattr(conn, '_reader'): + _poll = conn._reader.poll + if hasattr(conn, 'get_payload') and conn.get_payload: + get_payload = conn.get_payload + + def _recv(timeout, loads=pickle_loads): + return True, loads(get_payload()) + else: + def _recv(timeout): # noqa + if _poll(timeout): + return True, get() + return False, None + else: + def _recv(timeout): # noqa + try: + return True, get(timeout=timeout) + except Queue.Empty: + return False, None + return _recv + + def _make_child_methods(self, loads=pickle_loads): + self.wait_for_job = self._make_protected_receive(self.inq) + self.wait_for_syn = (self._make_protected_receive(self.synq) + if self.synq else None) + + def _make_protected_receive(self, conn): + _receive = self._make_recv_method(conn) + should_shutdown = self._shutdown.is_set if self._shutdown else None + + def receive(debug=debug): + if should_shutdown and should_shutdown(): + debug('worker got sentinel -- exiting') + raise SystemExit(EX_OK) try: - wrapped = MaybeEncodingError(exc, result[1]) - einfo = ExceptionInfo((MaybeEncodingError, wrapped, tb)) - put((READY, (job, i, (False, einfo)))) - finally: - del(tb) + ready, req = _receive(1.0) + if not ready: + return None + except (EOFError, IOError) as exc: + if get_errno(exc) == errno.EINTR: + return None # interrupted, maybe by gdb + debug('worker got %s -- exiting', type(exc).__name__) + raise SystemExit(EX_FAILURE) + if req is None: + debug('worker got sentinel -- exiting') + raise SystemExit(EX_FAILURE) + return req + + return receive - completed += 1 - debug('worker exiting after %d tasks', completed) - if exitcode is None and maxtasks: - exitcode = EX_RECYCLE if completed == maxtasks else EX_FAILURE - sys.exit(exitcode or EX_OK) # # Class representing a process pool @@ -326,14 +458,14 @@ class PoolThread(threading.Thread): def run(self): try: return self.body() - except RestartFreqExceeded, exc: + except RestartFreqExceeded as exc: error("Thread %r crashed: %r", type(self).__name__, exc, - exc_info=True) + exc_info=1) _kill(os.getpid(), signal.SIGTERM) sys.exit() - except Exception, exc: + except Exception as exc: error("Thread %r crashed: %r", type(self).__name__, exc, - exc_info=True) + exc_info=1) os._exit(1) def start(self, *args, **kwargs): @@ -375,7 +507,7 @@ class Supervisor(PoolThread): # the max restart frequency. prev_state = pool.restart_state pool.restart_state = restart_state(10 * pool._processes, 1) - for _ in xrange(10): + for _ in range(10): if self._state == RUN and pool._state == RUN: pool._maintain_pool() time.sleep(0.1) @@ -424,8 +556,8 @@ class TaskHandler(PoolThread): set_length(i + 1) continue break - except Exception, exc: - print("Task Handler ERROR: %r" % (exc, )) + except Exception as exc: + error('Task Handler ERROR: %r', exc, exc_info=1) break else: debug('task handler got sentinel') @@ -466,10 +598,10 @@ class TimeoutHandler(PoolThread): super(TimeoutHandler, self).__init__() def _process_by_pid(self, pid): - for index, process in enumerate(self.processes): - if process.pid == pid: - return process, index - return None, None + return next(( + (proc, i) for i, proc in enumerate(self.processes) + if proc.pid == pid + ), (None, None)) def on_soft_timeout(self, job): debug('soft time limit exceeded for %r', job) @@ -483,7 +615,7 @@ class TimeoutHandler(PoolThread): try: _kill(job._worker_pid, SIG_SOFT_TIMEOUT) - except OSError, exc: + except OSError as exc: if get_errno(exc) != errno.ESRCH: raise @@ -533,7 +665,7 @@ class TimeoutHandler(PoolThread): def _timed_out(start, timeout): if not start or not timeout: return False - if time.time() >= start + timeout: + if monotonic() >= start + timeout: return True # Inner-loop @@ -543,7 +675,7 @@ class TimeoutHandler(PoolThread): if dirty: dirty = set(k for k in dirty if k in cache) - for i, job in cache.items(): + for i, job in list(cache.items()): ack_time = job._time_accepted soft_timeout = job._soft_timeout if soft_timeout is None: @@ -571,7 +703,7 @@ class TimeoutHandler(PoolThread): if self._it is None: self._it = self.handle_timeouts() try: - self._it.next() + next(self._it) except StopIteration: self._it = None @@ -579,7 +711,8 @@ class TimeoutHandler(PoolThread): class ResultHandler(PoolThread): def __init__(self, outqueue, get, cache, poll, - join_exited_workers, putlock, restart_state, check_timeouts): + join_exited_workers, putlock, restart_state, + check_timeouts, on_job_ready): self.outqueue = outqueue self.get = get self.cache = cache @@ -590,27 +723,31 @@ class ResultHandler(PoolThread): self._it = None self._shutdown_complete = False self.check_timeouts = check_timeouts + self.on_job_ready = on_job_ready + self._make_methods() super(ResultHandler, self).__init__() def on_stop_not_started(self): # used when pool started without result handler thread. self.finish_at_shutdown(handle_timeouts=True) - def _process_result(self, timeout=1.0): + def _make_methods(self): cache = self.cache - poll = self.poll putlock = self.putlock restart_state = self.restart_state + on_job_ready = self.on_job_ready - def on_ack(job, i, time_accepted, pid): + def on_ack(job, i, time_accepted, pid, synqW_fd): + restart_state.R = 0 try: - cache[job]._ack(i, time_accepted, pid) + cache[job]._ack(i, time_accepted, pid, synqW_fd) except (KeyError, AttributeError): # Object gone or doesn't support _ack (e.g. IMAPIterator). pass - def on_ready(job, i, obj): - restart_state.R = 0 + def on_ready(job, i, obj, inqW_fd): + if on_job_ready is not None: + on_job_ready(job, i, obj, inqW_fd) try: item = cache[job] except KeyError: @@ -623,7 +760,9 @@ class ResultHandler(PoolThread): except KeyError: pass - state_handlers = {ACK: on_ack, READY: on_ready} + state_handlers = self.state_handlers = { + ACK: on_ack, READY: on_ready, + } def on_state_change(task): state, args = task @@ -631,11 +770,16 @@ class ResultHandler(PoolThread): state_handlers[state](*args) except KeyError: debug("Unknown job state: %s (args=%s)", state, args) + self.on_state_change = on_state_change + + def _process_result(self, timeout=1.0): + poll = self.poll + on_state_change = self.on_state_change while 1: try: ready, task = poll(timeout) - except (IOError, EOFError), exc: + except (IOError, EOFError) as exc: debug('result handler got %r -- exiting', exc) raise CoroStop() @@ -653,15 +797,14 @@ class ResultHandler(PoolThread): break else: break + yield - yield - - def handle_event(self, *args): + def handle_event(self, fileno=None, events=None): if self._state == RUN: if self._it is None: self._it = self._process_result(0) # non-blocking try: - self._it.next() + next(self._it) except (StopIteration, CoroStop): self._it = None @@ -684,39 +827,8 @@ class ResultHandler(PoolThread): cache = self.cache poll = self.poll join_exited_workers = self.join_exited_workers - putlock = self.putlock - restart_state = self.restart_state check_timeouts = self.check_timeouts - - def on_ack(job, i, time_accepted, pid): - try: - cache[job]._ack(i, time_accepted, pid) - except (KeyError, AttributeError): - # Object gone or doesn't support _ack (e.g. IMAPIterator). - pass - - def on_ready(job, i, obj): - restart_state.R = 0 - try: - item = cache[job] - except KeyError: - return - if not item.ready(): - if putlock is not None: - putlock.release() - try: - item._set(i, obj) - except KeyError: - pass - - state_handlers = {ACK: on_ack, READY: on_ready} - - def on_state_change(task): - state, args = task - try: - state_handlers[state](*args) - except KeyError: - debug("Unknown job state: %s (args=%s)", state, args) + on_state_change = self.on_state_change time_terminate = None while cache and self._state != TERMINATE: @@ -724,7 +836,7 @@ class ResultHandler(PoolThread): check_timeouts() try: ready, task = poll(1.0) - except (IOError, EOFError), exc: + except (IOError, EOFError) as exc: debug('result handler got %r -- exiting', exc) return @@ -737,7 +849,7 @@ class ResultHandler(PoolThread): try: join_exited_workers(shutdown=True) except WorkersJoined: - now = time.time() + now = monotonic() if not time_terminate: time_terminate = now else: @@ -769,7 +881,7 @@ class Pool(object): ''' Class which supports an async version of applying functions to arguments. ''' - Process = Process + Worker = Worker Supervisor = Supervisor TaskHandler = TaskHandler TimeoutHandler = TimeoutHandler @@ -778,7 +890,7 @@ class Pool(object): def __init__(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None, timeout=None, soft_timeout=None, - lost_worker_timeout=LOST_WORKER_TIMEOUT, + lost_worker_timeout=None, max_restarts=None, max_restart_freq=1, on_process_up=None, on_process_down=None, @@ -787,9 +899,13 @@ class Pool(object): threads=True, semaphore=None, putlocks=False, - allow_restart=False): + allow_restart=False, + synack=False, + on_process_exit=None, + **kwargs): + self.synack = synack self._setup_queues() - self._taskqueue = Queue.Queue() + self._taskqueue = Queue() self._cache = {} self._state = RUN self.timeout = timeout @@ -797,6 +913,7 @@ class Pool(object): self._maxtasksperchild = maxtasksperchild self._initializer = initializer self._initargs = initargs + self._on_process_exit = on_process_exit self.lost_worker_timeout = lost_worker_timeout or LOST_WORKER_TIMEOUT self.on_process_up = on_process_up self.on_process_down = on_process_down @@ -805,9 +922,6 @@ class Pool(object): self.threads = threads self.readers = {} self.allow_restart = allow_restart - # Contains processes that we have terminated, - # and that the supervisor should not raise an error for. - self.signalled = set() if soft_timeout and SIG_SOFT_TIMEOUT is None: warnings.warn(UserWarning( @@ -816,32 +930,28 @@ class Pool(object): )) soft_timeout = None - if processes is None: - try: - processes = cpu_count() - except NotImplementedError: - processes = 1 - self._processes = processes - self.max_restarts = max_restarts or round(processes * 100) + self._processes = self.cpu_count() if processes is None else processes + self.max_restarts = max_restarts or round(self._processes * 100) self.restart_state = restart_state(max_restarts, max_restart_freq or 1) - if initializer is not None and not callable(initializer): + if initializer is not None and \ + not isinstance(initializer, Callable): raise TypeError('initializer must be a callable') + if on_process_exit is not None and \ + not isinstance(on_process_exit, Callable): + raise TypeError('on_process_exit must be callable') + self._pool = [] self._poolctrl = {} self.putlocks = putlocks self._putlock = semaphore or LaxBoundedSemaphore(self._processes) - for i in range(processes): + for i in range(self._processes): self._create_worker_process(i) self._worker_handler = self.Supervisor(self) if threads: self._worker_handler.start() - else: - self.readers.update( - dict((w._popen.sentinel, self.maintain_pool) - for w in self._pool)) self._task_handler = self.TaskHandler(self._taskqueue, self._quick_put, @@ -855,60 +965,90 @@ class Pool(object): self._pool, self._cache, self.soft_timeout, self.timeout, ) - self._timeout_handler_mutex = threading.Lock() + self._timeout_handler_mutex = Lock() self._timeout_handler_started = False if self.timeout is not None or self.soft_timeout is not None: self._start_timeout_handler() # If running without threads, we need to check for timeouts # while waiting for unfinished work at shutdown. - check_timeouts = None + self.check_timeouts = None if not threads: - check_timeouts = self._timeout_handler.handle_event + self.check_timeouts = self._timeout_handler.handle_event # Thread processing results in the outqueue. - self._result_handler = self.ResultHandler( - self._outqueue, self._quick_get, self._cache, - self._poll_result, self._join_exited_workers, - self._putlock, self.restart_state, check_timeouts, - ) + self._result_handler = self.create_result_handler() + self.handle_result_event = self._result_handler.handle_event if threads: self._result_handler.start() - else: - self.readers[self._outqueue._reader] = \ - self._result_handler.handle_event self._terminate = Finalize( self, self._terminate_pool, args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, self._worker_handler, self._task_handler, self._result_handler, self._cache, - self._timeout_handler), + self._timeout_handler, + self._help_stuff_finish_args()), exitpriority=15, ) + def create_result_handler(self, **extra_kwargs): + return self.ResultHandler( + self._outqueue, self._quick_get, self._cache, + self._poll_result, self._join_exited_workers, + self._putlock, self.restart_state, self.check_timeouts, + self.on_job_ready, **extra_kwargs + ) + + def on_job_ready(self, job, i, obj, inqW_fd): + pass + + def _help_stuff_finish_args(self): + return self._inqueue, self._task_handler, self._pool + + def cpu_count(self): + try: + return cpu_count() + except NotImplementedError: + return 1 + + def handle_result_event(self, *args): + return self._result_handler.handle_event(*args) + + def _process_register_queues(self, worker, queues): + pass + + def _process_by_pid(self, pid): + return next(( + (proc, i) for i, proc in enumerate(self._pool) + if proc.pid == pid + ), (None, None)) + + def get_process_queues(self): + return self._inqueue, self._outqueue, None + def _create_worker_process(self, i): sentinel = Event() if self.allow_restart else None - w = self.Process( - target=worker, - args=( - self._inqueue, self._outqueue, - self._initializer, self._initargs, - self._maxtasksperchild, - sentinel - ), + inq, outq, synq = self.get_process_queues() + w = self.Worker( + inq, outq, synq, self._initializer, self._initargs, + self._maxtasksperchild, sentinel, self._on_process_exit, ) self._pool.append(w) + self._process_register_queues(w, (inq, outq, synq)) w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.index = i w.start() + self._poolctrl[w.pid] = sentinel if self.on_process_up: self.on_process_up(w) - self._poolctrl[w.pid] = sentinel return w + def process_flush_queues(self, worker): + pass + def _join_exited_workers(self, shutdown=False): """Cleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were @@ -919,20 +1059,12 @@ class Pool(object): # but we have no way to accurately tell if it did. So we wait for # _lost_worker_timeout seconds before we mark the job with # WorkerLostError. - for job in [job for job in self._cache.values() + for job in [job for job in list(self._cache.values()) if not job.ready() and job._worker_lost]: - now = now or time.time() + now = now or monotonic() lost_time, lost_ret = job._worker_lost if now - lost_time > job._lost_worker_timeout: - try: - raise WorkerLostError( - "Worker exited prematurely (exitcode: %r)." % ( - lost_ret, )) - except WorkerLostError: - exc_info = ExceptionInfo() - job._set(None, (False, exc_info)) - else: # pragma: no cover - pass + self.mark_as_worker_lost(job, lost_ret) if shutdown and not len(self._pool): raise WorkersJoined() @@ -947,52 +1079,106 @@ class Pool(object): debug('Supervisor: worked %d joined', i) cleaned[worker.pid] = worker exitcodes[worker.pid] = worker.exitcode - if worker.exitcode not in (EX_OK, EX_RECYCLE): - error('Process %r pid:%r exited with exitcode %r' % ( - worker.name, worker.pid, worker.exitcode)) + if worker.exitcode not in (EX_OK, EX_RECYCLE) and \ + not getattr(worker, '_controlled_termination', False): + error( + 'Process %r pid:%r exited with exitcode %r', + worker.name, worker.pid, worker.exitcode, exc_info=0, + ) + self.process_flush_queues(worker) del self._pool[i] del self._poolctrl[worker.pid] if cleaned: - for job in self._cache.values(): - for worker_pid in job.worker_pids(): - if worker_pid in cleaned and not job.ready(): - if worker_pid in self.signalled: - try: - raise Terminated(-exitcodes[worker_pid]) - except Terminated: - job._set(None, (False, ExceptionInfo())) + all_pids = [w.pid for w in self._pool] + for job in list(self._cache.values()): + acked_by_gone = next( + (pid for pid in job.worker_pids() + if pid in cleaned or pid not in all_pids), + None + ) + # already accepted by process + if acked_by_gone: + self.on_job_process_down(job, acked_by_gone) + if not job.ready(): + exitcode = exitcodes[acked_by_gone] + if getattr(cleaned[acked_by_gone], + '_job_terminated', False): + job._set_terminated(exitcode) else: - job._worker_lost = (time.time(), - exitcodes[worker_pid]) - break - for worker in cleaned.itervalues(): + self.on_job_process_lost( + job, acked_by_gone, exitcode, + ) + else: + # started writing to + write_to = job._write_to + # was scheduled to write to + sched_for = job._scheduled_for + + if write_to and write_to.exitcode is not None: + self.on_job_process_down(job, write_to.pid) + elif sched_for and sched_for.exitcode is not None: + self.on_job_process_down(job, sched_for.pid) + + for worker in values(cleaned): if self.on_process_down: + if not shutdown: + self._process_cleanup_queues(worker) self.on_process_down(worker) - return exitcodes.values() + return list(exitcodes.values()) return [] + def on_partial_read(self, job, worker): + pass + + def _process_cleanup_queues(self, worker): + pass + + def on_job_process_down(self, job, pid_gone): + pass + + def on_job_process_lost(self, job, pid, exitcode): + job._worker_lost = (monotonic(), exitcode) + + def mark_as_worker_lost(self, job, exitcode): + try: + raise WorkerLostError( + 'Worker exited prematurely: {0}.'.format( + human_status(exitcode)), + ) + except WorkerLostError: + job._set(None, (False, ExceptionInfo())) + else: # pragma: no cover + pass + def __enter__(self): return self def __exit__(self, *exc_info): return self.terminate() + def on_grow(self, n): + pass + + def on_shrink(self, n): + pass + def shrink(self, n=1): for i, worker in enumerate(self._iterinactive()): self._processes -= 1 if self._putlock: self._putlock.shrink() - worker.terminate() + worker.terminate_controlled() + self.on_shrink(1) if i == n - 1: return - raise ValueError("Can't shrink pool. All processes busy!") + raise ValueError("Can't shrink pool. All processes busy!") def grow(self, n=1): - for i in xrange(n): - #assert len(self._pool) == self._processes + for i in range(n): self._processes += 1 if self._putlock: self._putlock.grow() + self.on_grow(n) def _iterinactive(self): for worker in self._pool: @@ -1001,7 +1187,7 @@ class Pool(object): raise StopIteration() def _worker_active(self, worker): - for job in self._cache.values(): + for job in values(self._cache): if worker.pid in job.worker_pids(): return True return False @@ -1038,7 +1224,7 @@ class Pool(object): if self._putlock is not None: self._putlock.release() - def maintain_pool(self, *args, **kwargs): + def maintain_pool(self): if self._worker_handler._state == RUN and self._state == RUN: try: self._maintain_pool() @@ -1046,6 +1232,12 @@ class Pool(object): self.close() self.join() raise + except OSError as exc: + if get_errno(exc) == errno.ENOMEM: + reraise(MemoryError, + MemoryError(str(exc)), + sys.exc_info()[2]) + raise def _setup_queues(self): from billiard.queues import SimpleQueue @@ -1114,7 +1306,7 @@ class Pool(object): result = IMapIterator(self._cache, lost_worker_timeout=lost_worker_timeout) self._taskqueue.put(( - ((result._job, i, func, (x,), {}) + ((TASK, (result._job, i, func, (x,), {})) for i, x in enumerate(iterable)), result._set_length, )) @@ -1125,7 +1317,7 @@ class Pool(object): result = IMapIterator(self._cache, lost_worker_timeout=lost_worker_timeout) self._taskqueue.put(( - ((result._job, i, mapstar, (x,), {}) + ((TASK, (result._job, i, mapstar, (x,), {})) for i, x in enumerate(task_batches)), result._set_length, )) @@ -1144,7 +1336,7 @@ class Pool(object): self._cache, lost_worker_timeout=lost_worker_timeout, ) self._taskqueue.put(( - ((result._job, i, func, (x,), {}) + ((TASK, (result._job, i, func, (x,), {})) for i, x in enumerate(iterable)), result._set_length, )) @@ -1156,7 +1348,7 @@ class Pool(object): self._cache, lost_worker_timeout=lost_worker_timeout, ) self._taskqueue.put(( - ((result._job, i, mapstar, (x,), {}) + ((TASK, (result._job, i, mapstar, (x,), {})) for i, x in enumerate(task_batches)), result._set_length, )) @@ -1166,7 +1358,8 @@ class Pool(object): callback=None, error_callback=None, accept_callback=None, timeout_callback=None, waitforslot=None, soft_timeout=None, timeout=None, lost_worker_timeout=None, - callbacks_propagate=()): + callbacks_propagate=(), + correlation_id=None): ''' Asynchronous equivalent of `apply()` method. @@ -1175,11 +1368,12 @@ class Pool(object): Simplified the flow is like this: - >>> if accept_callback: - ... accept_callback() - >>> retval = func(*args, **kwds) - >>> if callback: - ... callback(retval) + >>> def apply_async(func, args, kwds, callback, accept_callback): + ... if accept_callback: + ... accept_callback() + ... retval = func(*args, **kwds) + ... if callback: + ... callback(retval) ''' if self._state != RUN: @@ -1193,36 +1387,43 @@ class Pool(object): "on this platform: It does not have the SIGUSR1 signal.", )) soft_timeout = None - if waitforslot is None: - waitforslot = self.putlocks - if waitforslot and self._putlock is not None and self._state == RUN: - self._putlock.acquire() if self._state == RUN: + waitforslot = self.putlocks if waitforslot is None else waitforslot + if waitforslot and self._putlock is not None: + self._putlock.acquire() result = ApplyResult( self._cache, callback, accept_callback, timeout_callback, error_callback, soft_timeout, timeout, lost_worker_timeout, on_timeout_set=self.on_timeout_set, on_timeout_cancel=self.on_timeout_cancel, callbacks_propagate=callbacks_propagate, + send_ack=self.send_ack if self.synack else None, + correlation_id=correlation_id, ) if timeout or soft_timeout: # start the timeout handler thread when required. self._start_timeout_handler() if self.threads: - self._taskqueue.put(([(result._job, None, - func, args, kwds)], None)) + self._taskqueue.put(([(TASK, (result._job, None, + func, args, kwds))], None)) else: - self._quick_put((result._job, None, func, args, kwds)) + self._quick_put((TASK, (result._job, None, func, args, kwds))) return result + def send_ack(self, response, job, i, fd): + pass + def terminate_job(self, pid, sig=None): - try: - _kill(pid, sig or signal.SIGTERM) - except OSError, exc: - if get_errno(exc) != errno.ESRCH: - raise - else: - self.signalled.add(pid) + proc, _ = self._process_by_pid(pid) + if proc is not None: + try: + _kill(pid, sig or signal.SIGTERM) + except OSError as exc: + if get_errno(exc) != errno.ESRCH: + raise + else: + proc._controlled_termination = True + proc._job_terminated = True def map_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): @@ -1253,7 +1454,7 @@ class Pool(object): task_batches = Pool._get_tasks(func, iterable, chunksize) result = MapResult(self._cache, chunksize, len(iterable), callback, error_callback=error_callback) - self._taskqueue.put((((result._job, i, mapper, (x,), {}) + self._taskqueue.put((((TASK, (result._job, i, mapper, (x,), {})) for i, x in enumerate(task_batches)), None)) return result @@ -1287,25 +1488,31 @@ class Pool(object): self._worker_handler.terminate() self._terminate() + @staticmethod + def _stop_task_handler(task_handler): + stop_if_not_current(task_handler) + def join(self): assert self._state in (CLOSE, TERMINATE) debug('joining worker handler') stop_if_not_current(self._worker_handler) debug('joining task handler') - stop_if_not_current(self._task_handler) + self._stop_task_handler(self._task_handler) debug('joining result handler') stop_if_not_current(self._result_handler) debug('result handler joined') for i, p in enumerate(self._pool): - debug('joining worker %s/%s (%r)', i, len(self._pool), p) - p.join() + debug('joining worker %s/%s (%r)', i+1, len(self._pool), p) + if p._popen is not None: # process started? + p.join() + debug('pool join complete') def restart(self): - for e in self._poolctrl.itervalues(): + for e in values(self._poolctrl): e.set() @staticmethod - def _help_stuff_finish(inqueue, task_handler, size): + def _help_stuff_finish(inqueue, task_handler, _pool): # task_handler may be blocked trying to put items on inqueue debug('removing tasks from inqueue until task handler finished') inqueue._rlock.acquire() @@ -1313,10 +1520,15 @@ class Pool(object): inqueue._reader.recv() time.sleep(0) + @classmethod + def _set_result_sentinel(cls, outqueue, pool): + outqueue.put(None) + @classmethod def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, worker_handler, task_handler, - result_handler, cache, timeout_handler): + result_handler, cache, timeout_handler, + help_stuff_finish_args): # this is guaranteed to only be called once debug('finalizing pool') @@ -1327,10 +1539,10 @@ class Pool(object): taskqueue.put(None) # sentinel debug('helping task handler/workers to finish') - cls._help_stuff_finish(inqueue, task_handler, len(pool)) + cls._help_stuff_finish(*help_stuff_finish_args) result_handler.terminate() - outqueue.put(None) # sentinel + cls._set_result_sentinel(outqueue, pool) if timeout_handler is not None: timeout_handler.terminate() @@ -1339,11 +1551,11 @@ class Pool(object): if pool and hasattr(pool[0], 'terminate'): debug('terminating workers') for p in pool: - if p.exitcode is None: + if p._popen is not None and p.exitcode is None: p.terminate() debug('joining task handler') - task_handler.stop() + cls._stop_task_handler(task_handler) debug('joining result handler') result_handler.stop() @@ -1360,7 +1572,10 @@ class Pool(object): debug('cleaning up worker %d', p.pid) p.join() debug('pool workers joined') -DynamicPool = Pool + + @property + def process_sentinels(self): + return [w._popen.sentinel for w in self._pool] # # Class whose instances are returned by `Pool.apply_async()` @@ -1369,32 +1584,44 @@ DynamicPool = Pool class ApplyResult(object): _worker_lost = None + _write_to = None + _scheduled_for = None def __init__(self, cache, callback, accept_callback=None, timeout_callback=None, error_callback=None, soft_timeout=None, timeout=None, lost_worker_timeout=LOST_WORKER_TIMEOUT, on_timeout_set=None, on_timeout_cancel=None, - callbacks_propagate=()): - self._mutex = threading.Lock() + callbacks_propagate=(), send_ack=None, + correlation_id=None): + self.correlation_id = correlation_id + self._mutex = Lock() self._event = threading.Event() - self._job = job_counter.next() + self._job = next(job_counter) self._cache = cache self._callback = callback self._accept_callback = accept_callback self._error_callback = error_callback self._timeout_callback = timeout_callback self._timeout = timeout + self._terminated = None self._soft_timeout = soft_timeout self._lost_worker_timeout = lost_worker_timeout self._on_timeout_set = on_timeout_set self._on_timeout_cancel = on_timeout_cancel self._callbacks_propagate = callbacks_propagate or () + self._send_ack = send_ack self._accepted = False + self._cancelled = False self._worker_pid = None self._time_accepted = None cache[self._job] = self + def __repr__(self): + return ''.format( + id=self._job, ack=self._accepted, ready=self.ready(), + ) + def ready(self): return self._event.isSet() @@ -1405,8 +1632,24 @@ class ApplyResult(object): assert self.ready() return self._success + def _cancel(self): + """Only works if synack is used.""" + self._cancelled = True + + def discard(self): + self._cache.pop(self._job, None) + + def terminate(self, signum): + self._terminated = signum + + def _set_terminated(self, signum=0): + try: + raise Terminated(-signum) + except Terminated: + self._set(None, (False, ExceptionInfo())) + def worker_pids(self): - return filter(None, [self._worker_pid]) + return [self._worker_pid] if self._worker_pid else [] def wait(self, timeout=None): self._event.wait(timeout) @@ -1426,9 +1669,9 @@ class ApplyResult(object): fun(*args) except self._callbacks_propagate: raise - except Exception, exc: - error("Pool callback raised exception: %r", exc, - exc_info=True) + except Exception as exc: + error('Pool callback raised exception: %r', exc, + exc_info=1) def _set(self, i, obj): with self._mutex: @@ -1437,6 +1680,9 @@ class ApplyResult(object): self._success, self._value = obj self._event.set() if self._accepted: + # if not accepted yet, then the set message + # was received before the ack, which means + # the ack will remove the entry. self._cache.pop(self._job, None) # apply callbacks last @@ -1448,18 +1694,38 @@ class ApplyResult(object): self.safe_apply_callback( self._error_callback, self._value) - def _ack(self, i, time_accepted, pid): + def _ack(self, i, time_accepted, pid, synqW_fd): with self._mutex: + if self._cancelled and self._send_ack: + self._accepted = True + if synqW_fd: + return self._send_ack(NACK, pid, self._job, synqW_fd) + return self._accepted = True self._time_accepted = time_accepted self._worker_pid = pid if self.ready(): + # ack received after set() self._cache.pop(self._job, None) if self._on_timeout_set: self._on_timeout_set(self, self._soft_timeout, self._timeout) + response = ACK if self._accept_callback: - self.safe_apply_callback( - self._accept_callback, pid, time_accepted) + try: + self._accept_callback(pid, time_accepted) + except self._propagate_errors: + response = NACK + raise + except Exception: + response = NACK + # ignore other errors + finally: + if self._send_ack and synqW_fd: + return self._send_ack( + response, pid, self._job, synqW_fd + ) + if self._send_ack and synqW_fd: + self._send_ack(response, pid, self._job, synqW_fd) # # Class whose instances are returned by `Pool.map_async()` @@ -1520,7 +1786,7 @@ class MapResult(ApplyResult): return all(self._accepted) def worker_pids(self): - return filter(None, self._worker_pid) + return [pid for pid in self._worker_pid if pid] # # Class whose instances are returned by `Pool.imap()` @@ -1532,9 +1798,9 @@ class IMapIterator(object): def __init__(self, cache, lost_worker_timeout=LOST_WORKER_TIMEOUT): self._cond = threading.Condition(threading.Lock()) - self._job = job_counter.next() + self._job = next(job_counter) self._cache = cache - self._items = collections.deque() + self._items = deque() self._index = 0 self._length = None self._ready = False @@ -1634,22 +1900,22 @@ class ThreadPool(Pool): Pool.__init__(self, processes, initializer, initargs) def _setup_queues(self): - self._inqueue = Queue.Queue() - self._outqueue = Queue.Queue() + self._inqueue = Queue() + self._outqueue = Queue() self._quick_put = self._inqueue.put self._quick_get = self._outqueue.get def _poll_result(timeout): try: return True, self._quick_get(timeout=timeout) - except Queue.Empty: + except Empty: return False, None self._poll_result = _poll_result @staticmethod - def _help_stuff_finish(inqueue, task_handler, size): + def _help_stuff_finish(inqueue, task_handler, pool): # put sentinels at head of inqueue to make workers finish with inqueue.not_empty: inqueue.queue.clear() - inqueue.queue.extend([None] * size) + inqueue.queue.extend([None] * len(pool)) inqueue.not_empty.notify_all() diff --git a/awx/lib/site-packages/billiard/process.py b/awx/lib/site-packages/billiard/process.py index b8418229db..8369072b99 100644 --- a/awx/lib/site-packages/billiard/process.py +++ b/awx/lib/site-packages/billiard/process.py @@ -27,6 +27,7 @@ try: from _weakrefset import WeakSet except ImportError: WeakSet = None # noqa +from .five import items, string_t try: ORIGINAL_DIR = os.path.abspath(os.getcwd()) @@ -85,7 +86,7 @@ class Process(object): def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, daemon=None, **_kw): assert group is None, 'group argument must be None for now' - count = _current_process._counter.next() + count = next(_current_process._counter) self._identity = _current_process._identity + (count,) self._authkey = _current_process._authkey if daemon is not None: @@ -164,7 +165,7 @@ class Process(object): return self._name def _set_name(self, value): - assert isinstance(name, basestring), 'name must be a string' + assert isinstance(name, string_t), 'name must be a string' self._name = value name = property(_get_name, _set_name) @@ -256,14 +257,17 @@ class Process(object): _current_process = self # Re-init logging system. - # Workaround for http://bugs.python.org/issue6721#msg140215 - # Python logging module uses RLock() objects which are broken after - # fork. This can result in a deadlock (Celery Issue #496). - logger_names = logging.Logger.manager.loggerDict.keys() + # Workaround for http://bugs.python.org/issue6721/#msg140215 + # Python logging module uses RLock() objects which are broken + # after fork. This can result in a deadlock (Celery Issue #496). + loggerDict = logging.Logger.manager.loggerDict + logger_names = list(loggerDict.keys()) logger_names.append(None) # for root logger for name in logger_names: - for handler in logging.getLogger(name).handlers: - handler.createLock() + if not name or not isinstance(loggerDict[name], + logging.PlaceHolder): + for handler in logging.getLogger(name).handlers: + handler.createLock() logging._lock = threading.RLock() try: @@ -279,15 +283,15 @@ class Process(object): exitcode = 0 finally: util._exit_function() - except SystemExit, e: - if not e.args: + except SystemExit as exc: + if not exc.args: exitcode = 1 - elif isinstance(e.args[0], int): - exitcode = e.args[0] + elif isinstance(exc.args[0], int): + exitcode = exc.args[0] else: - sys.stderr.write(str(e.args[0]) + '\n') + sys.stderr.write(str(exc.args[0]) + '\n') _maybe_flush(sys.stderr) - exitcode = 0 if isinstance(e.args[0], str) else 1 + exitcode = 0 if isinstance(exc.args[0], str) else 1 except: exitcode = 1 if not util.error('Process %s', self.name, exc_info=True): @@ -347,7 +351,7 @@ del _MainProcess _exitcode_to_name = {} -for name, signum in signal.__dict__.items(): +for name, signum in items(signal.__dict__): if name[:3] == 'SIG' and '_' not in name: _exitcode_to_name[-signum] = name diff --git a/awx/lib/site-packages/billiard/queues.py b/awx/lib/site-packages/billiard/queues.py index 2554a5ef07..c3d15ff6d7 100644 --- a/awx/lib/site-packages/billiard/queues.py +++ b/awx/lib/site-packages/billiard/queues.py @@ -7,7 +7,6 @@ # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import -from __future__ import with_statement __all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] @@ -15,17 +14,16 @@ import sys import os import threading import collections -import time import weakref import errno -from Queue import Empty, Full - from . import Pipe from ._ext import _billiard from .compat import get_errno +from .five import monotonic from .synchronize import Lock, BoundedSemaphore, Semaphore, Condition from .util import debug, error, info, Finalize, register_after_fork +from .five import Empty, Full from .forking import assert_spawning @@ -96,12 +94,12 @@ class Queue(object): else: if block: - deadline = time.time() + timeout + deadline = monotonic() + timeout if not self._rlock.acquire(block, timeout): raise Empty try: if block: - timeout = deadline - time.time() + timeout = deadline - monotonic() if timeout < 0 or not self._poll(timeout): raise Empty elif not self._poll(): @@ -238,7 +236,7 @@ class Queue(object): send(obj) except IndexError: pass - except Exception, exc: + except Exception as exc: if ignore_epipe and get_errno(exc) == errno.EPIPE: return # Since this runs in a daemon thread the resources it uses @@ -306,19 +304,17 @@ class JoinableQueue(Queue): self._cond.wait() -class SimpleQueue(object): +class _SimpleQueue(object): ''' Simplified Queue type -- really just a locked pipe ''' - def __init__(self): - self._reader, self._writer = Pipe(duplex=False) - self._rlock = Lock() + def __init__(self, rnonblock=False, wnonblock=False): + self._reader, self._writer = Pipe( + duplex=False, rnonblock=rnonblock, wnonblock=wnonblock, + ) self._poll = self._reader.poll - if sys.platform == 'win32': - self._wlock = None - else: - self._wlock = Lock() + self._rlock = self._wlock = None self._make_methods() def empty(self): @@ -337,19 +333,22 @@ class SimpleQueue(object): try: recv_payload = self._reader.recv_payload except AttributeError: - recv_payload = None # C extension not installed + recv_payload = self._reader.recv_bytes rlock = self._rlock - def get(): - with rlock: - return recv() - self.get = get + if rlock is not None: + def get(): + with rlock: + return recv() + self.get = get - if recv_payload is not None: def get_payload(): with rlock: return recv_payload() self.get_payload = get_payload + else: + self.get = recv + self.get_payload = recv_payload if self._wlock is None: # writes to a message oriented win32 pipe are atomic @@ -362,3 +361,12 @@ class SimpleQueue(object): with wlock: return send(obj) self.put = put + + +class SimpleQueue(_SimpleQueue): + + def __init__(self): + self._reader, self._writer = Pipe(duplex=False) + self._rlock = Lock() + self._wlock = Lock() if sys.platform != 'win32' else None + self._make_methods() diff --git a/awx/lib/site-packages/billiard/reduction.py b/awx/lib/site-packages/billiard/reduction.py index c04a90db0d..20d942dd4e 100644 --- a/awx/lib/site-packages/billiard/reduction.py +++ b/awx/lib/site-packages/billiard/reduction.py @@ -1,200 +1,10 @@ -# -# Module to allow connection and socket objects to be transferred -# between processes -# -# multiprocessing/reduction.py -# -# Copyright (c) 2006-2008, R Oudkerk -# Licensed to PSF under a Contributor Agreement. -# - from __future__ import absolute_import -__all__ = [] - -import os import sys -import socket -import threading if sys.version_info[0] == 3: - from multiprocessing.connection import Client, Listener + from . import _reduction3 as reduction else: - from billiard._connection import Client, Listener # noqa + from . import _reduction as reduction # noqa -from . import current_process -from ._ext import _billiard, win32 -from .forking import Popen, duplicate, close, ForkingPickler -from .util import register_after_fork, debug, sub_debug - -if not(sys.platform == 'win32' or hasattr(_billiard, 'recvfd')): - raise ImportError('pickling of connections not supported') - - -# globals set later -_listener = None -_lock = None -_cache = set() - -# -# Platform specific definitions -# - -if sys.platform == 'win32': - # XXX Should this subprocess import be here? - import _subprocess # noqa - - def send_handle(conn, handle, destination_pid): - process_handle = win32.OpenProcess( - win32.PROCESS_ALL_ACCESS, False, destination_pid - ) - try: - new_handle = duplicate(handle, process_handle) - conn.send(new_handle) - finally: - close(process_handle) - - def recv_handle(conn): - return conn.recv() - -else: - def send_handle(conn, handle, destination_pid): # noqa - _billiard.sendfd(conn.fileno(), handle) - - def recv_handle(conn): # noqa - return _billiard.recvfd(conn.fileno()) - -# -# Support for a per-process server thread which caches pickled handles -# - - -def _reset(obj): - global _lock, _listener, _cache - for h in _cache: - close(h) - _cache.clear() - _lock = threading.Lock() - _listener = None - -_reset(None) -register_after_fork(_reset, _reset) - - -def _get_listener(): - global _listener - - if _listener is None: - _lock.acquire() - try: - if _listener is None: - debug('starting listener and thread for sending handles') - _listener = Listener(authkey=current_process().authkey) - t = threading.Thread(target=_serve) - t.daemon = True - t.start() - finally: - _lock.release() - - return _listener - - -def _serve(): - from .util import is_exiting, sub_warning - - while 1: - try: - conn = _listener.accept() - handle_wanted, destination_pid = conn.recv() - _cache.remove(handle_wanted) - send_handle(conn, handle_wanted, destination_pid) - close(handle_wanted) - conn.close() - except: - if not is_exiting(): - sub_warning('thread for sharing handles raised exception', - exc_info=True) - -# -# Functions to be used for pickling/unpickling objects with handles -# - - -def reduce_handle(handle): - if Popen.thread_is_spawning(): - return (None, Popen.duplicate_for_child(handle), True) - dup_handle = duplicate(handle) - _cache.add(dup_handle) - sub_debug('reducing handle %d', handle) - return (_get_listener().address, dup_handle, False) - - -def rebuild_handle(pickled_data): - address, handle, inherited = pickled_data - if inherited: - return handle - sub_debug('rebuilding handle %d', handle) - conn = Client(address, authkey=current_process().authkey) - conn.send((handle, os.getpid())) - new_handle = recv_handle(conn) - conn.close() - return new_handle - -# -# Register `_billiard.Connection` with `ForkingPickler` -# - - -def reduce_connection(conn): - rh = reduce_handle(conn.fileno()) - return rebuild_connection, (rh, conn.readable, conn.writable) - - -def rebuild_connection(reduced_handle, readable, writable): - handle = rebuild_handle(reduced_handle) - return _billiard.Connection( - handle, readable=readable, writable=writable - ) - -ForkingPickler.register(_billiard.Connection, reduce_connection) - -# -# Register `socket.socket` with `ForkingPickler` -# - - -def fromfd(fd, family, type_, proto=0): - s = socket.fromfd(fd, family, type_, proto) - if s.__class__ is not socket.socket: - s = socket.socket(_sock=s) - return s - - -def reduce_socket(s): - reduced_handle = reduce_handle(s.fileno()) - return rebuild_socket, (reduced_handle, s.family, s.type, s.proto) - - -def rebuild_socket(reduced_handle, family, type_, proto): - fd = rebuild_handle(reduced_handle) - _sock = fromfd(fd, family, type_, proto) - close(fd) - return _sock -ForkingPickler.register(socket.socket, reduce_socket) - -# -# Register `_billiard.PipeConnection` with `ForkingPickler` -# - -if sys.platform == 'win32': - - def reduce_pipe_connection(conn): - rh = reduce_handle(conn.fileno()) - return rebuild_pipe_connection, (rh, conn.readable, conn.writable) - - def rebuild_pipe_connection(reduced_handle, readable, writable): - handle = rebuild_handle(reduced_handle) - return _billiard.PipeConnection( - handle, readable=readable, writable=writable - ) - ForkingPickler.register(_billiard.PipeConnection, reduce_pipe_connection) +sys.modules[__name__] = reduction diff --git a/awx/lib/site-packages/billiard/sharedctypes.py b/awx/lib/site-packages/billiard/sharedctypes.py index fc676d0ebb..e336c80b13 100644 --- a/awx/lib/site-packages/billiard/sharedctypes.py +++ b/awx/lib/site-packages/billiard/sharedctypes.py @@ -12,7 +12,9 @@ import ctypes import weakref from . import heap, RLock -from .forking import assert_spawning, ForkingPickler +from .five import int_types +from .forking import assert_spawning +from .reduction import ForkingPickler __all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] @@ -48,7 +50,7 @@ def RawArray(typecode_or_type, size_or_initializer): Returns a ctypes array allocated from shared memory ''' type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) - if isinstance(size_or_initializer, (int, long)): + if isinstance(size_or_initializer, int_types): type_ = type_ * size_or_initializer obj = _new_value(type_) ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) @@ -66,7 +68,8 @@ def Value(typecode_or_type, *args, **kwds): ''' lock = kwds.pop('lock', None) if kwds: - raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys()) + raise ValueError( + 'unrecognized keyword argument(s): %s' % list(kwds.keys())) obj = RawValue(typecode_or_type, *args) if lock is False: return obj @@ -83,7 +86,8 @@ def Array(typecode_or_type, size_or_initializer, **kwds): ''' lock = kwds.pop('lock', None) if kwds: - raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys()) + raise ValueError( + 'unrecognized keyword argument(s): %s' % list(kwds.keys())) obj = RawArray(typecode_or_type, size_or_initializer) if lock is False: return obj diff --git a/awx/lib/site-packages/billiard/synchronize.py b/awx/lib/site-packages/billiard/synchronize.py index c487699888..0583a17362 100644 --- a/awx/lib/site-packages/billiard/synchronize.py +++ b/awx/lib/site-packages/billiard/synchronize.py @@ -19,9 +19,8 @@ import sys import threading -from time import time as _time - from ._ext import _billiard, ensure_SemLock +from .five import range, monotonic from .process import current_process from .util import Finalize, register_after_fork, debug from .forking import assert_spawning, Popen @@ -36,7 +35,7 @@ ensure_SemLock() # Constants # -RECURSIVE_MUTEX, SEMAPHORE = range(2) +RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) SEM_VALUE_MAX = _billiard.SemLock.SEM_VALUE_MAX try: @@ -115,7 +114,7 @@ class SemLock(object): @staticmethod def _make_name(): return '/%s-%s-%s' % (current_process()._semprefix, - os.getpid(), SemLock._counter.next()) + os.getpid(), next(SemLock._counter)) class Semaphore(SemLock): @@ -248,7 +247,7 @@ class Condition(object): # release lock count = self._lock._semlock._count() - for i in xrange(count): + for i in range(count): self._lock.release() try: @@ -259,7 +258,7 @@ class Condition(object): self._woken_count.release() # reacquire lock - for i in xrange(count): + for i in range(count): self._lock.acquire() return ret @@ -296,7 +295,7 @@ class Condition(object): sleepers += 1 if sleepers: - for i in xrange(sleepers): + for i in range(sleepers): self._woken_count.acquire() # wait for a sleeper to wake # rezero wait_semaphore in case some timeouts just happened @@ -308,13 +307,13 @@ class Condition(object): if result: return result if timeout is not None: - endtime = _time() + timeout + endtime = monotonic() + timeout else: endtime = None waittime = None while not result: if endtime is not None: - waittime = endtime - _time() + waittime = endtime - monotonic() if waittime <= 0: break self.wait(waittime) diff --git a/awx/lib/site-packages/billiard/tests/__init__.py b/awx/lib/site-packages/billiard/tests/__init__.py index 1ed389a005..a87fce1c5f 100644 --- a/awx/lib/site-packages/billiard/tests/__init__.py +++ b/awx/lib/site-packages/billiard/tests/__init__.py @@ -13,6 +13,9 @@ def teardown(): except (AttributeError, ImportError): pass - atexit._exithandlers[:] = [ - e for e in atexit._exithandlers if e[0] not in cancelled - ] + try: + atexit._exithandlers[:] = [ + e for e in atexit._exithandlers if e[0] not in cancelled + ] + except AttributeError: + pass diff --git a/awx/lib/site-packages/billiard/tests/test_common.py b/awx/lib/site-packages/billiard/tests/test_common.py index e141dd93d4..8fef4d1494 100644 --- a/awx/lib/site-packages/billiard/tests/test_common.py +++ b/awx/lib/site-packages/billiard/tests/test_common.py @@ -1,5 +1,4 @@ from __future__ import absolute_import -from __future__ import with_statement import os import signal diff --git a/awx/lib/site-packages/billiard/tests/utils.py b/awx/lib/site-packages/billiard/tests/utils.py index 25c6d23294..1ac881f2da 100644 --- a/awx/lib/site-packages/billiard/tests/utils.py +++ b/awx/lib/site-packages/billiard/tests/utils.py @@ -1,5 +1,4 @@ from __future__ import absolute_import -from __future__ import with_statement import re import sys @@ -13,6 +12,8 @@ except AttributeError: import unittest2 as unittest # noqa from unittest2.util import safe_repr, unorderable_list_difference # noqa +from billiard.five import string_t, items, values + from .compat import catch_warnings # -- adds assertWarns from recent unittest2, not in Python 2.7. @@ -25,7 +26,7 @@ class _AssertRaisesBaseContext(object): self.expected = expected self.failureException = test_case.failureException self.obj_name = None - if isinstance(expected_regex, basestring): + if isinstance(expected_regex, string_t): expected_regex = re.compile(expected_regex) self.expected_regex = expected_regex @@ -37,7 +38,7 @@ class _AssertWarnsContext(_AssertRaisesBaseContext): # The __warningregistry__'s need to be in a pristine state for tests # to work properly. warnings.resetwarnings() - for v in sys.modules.values(): + for v in values(sys.modules): if getattr(v, '__warningregistry__', None): v.__warningregistry__ = {} self.warnings_manager = catch_warnings(record=True) @@ -93,7 +94,7 @@ class Case(unittest.TestCase): def assertDictContainsSubset(self, expected, actual, msg=None): missing, mismatched = [], [] - for key, value in expected.iteritems(): + for key, value in items(expected): if key not in actual: missing.append(key) elif value != actual[key]: diff --git a/awx/lib/site-packages/billiard/util.py b/awx/lib/site-packages/billiard/util.py index 76c8431a6f..8b5e4c3122 100644 --- a/awx/lib/site-packages/billiard/util.py +++ b/awx/lib/site-packages/billiard/util.py @@ -10,16 +10,25 @@ from __future__ import absolute_import import errno import functools -import itertools -import weakref import atexit -import shutil -import tempfile -import threading # we want threading to install its - # cleanup function before multiprocessing does + +from multiprocessing.util import ( # noqa + _afterfork_registry, + _afterfork_counter, + _exit_function, + _finalizer_registry, + _finalizer_counter, + Finalize, + ForkAwareLocal, + ForkAwareThreadLock, + get_temp_dir, + is_exiting, + register_after_fork, + _run_after_forkers, + _run_finalizers, +) from .compat import get_errno -from .process import current_process, active_children __all__ = [ 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', @@ -45,17 +54,6 @@ DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' _logger = None _log_to_stderr = False -#: Support for reinitialization of objects when bootstrapping a child process -_afterfork_registry = weakref.WeakValueDictionary() -_afterfork_counter = itertools.count() - -#: Finalization using weakrefs -_finalizer_registry = {} -_finalizer_counter = itertools.count() - -#: set to true if the process is shutting down. -_exiting = False - def sub_debug(msg, *args, **kwargs): if _logger: @@ -138,195 +136,6 @@ def log_to_stderr(level=None): return _logger -def get_temp_dir(): - ''' - Function returning a temp directory which will be removed on exit - ''' - # get name of a temp directory which will be automatically cleaned up - if current_process()._tempdir is None: - tempdir = tempfile.mkdtemp(prefix='pymp-') - info('created temp directory %s', tempdir) - Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100) - current_process()._tempdir = tempdir - return current_process()._tempdir - - -def _run_after_forkers(): - items = list(_afterfork_registry.items()) - items.sort() - for (index, ident, func), obj in items: - try: - func(obj) - except Exception, e: - info('after forker raised exception %s', e) - - -def register_after_fork(obj, func): - _afterfork_registry[(_afterfork_counter.next(), id(obj), func)] = obj - - -class Finalize(object): - ''' - Class which supports object finalization using weakrefs - ''' - def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): - assert exitpriority is None or type(exitpriority) is int - - if obj is not None: - self._weakref = weakref.ref(obj, self) - else: - assert exitpriority is not None - - self._callback = callback - self._args = args - self._kwargs = kwargs or {} - self._key = (exitpriority, _finalizer_counter.next()) - - _finalizer_registry[self._key] = self - - def __call__(self, wr=None, - # Need to bind these locally because the globals - # could've been cleared at shutdown - _finalizer_registry=_finalizer_registry, - sub_debug=sub_debug): - ''' - Run the callback unless it has already been called or cancelled - ''' - try: - del _finalizer_registry[self._key] - except KeyError: - sub_debug('finalizer no longer registered') - else: - sub_debug( - 'finalizer calling %s with args %s and kwargs %s', - self._callback, self._args, self._kwargs, - ) - res = self._callback(*self._args, **self._kwargs) - self._weakref = self._callback = self._args = \ - self._kwargs = self._key = None - return res - - def cancel(self): - ''' - Cancel finalization of the object - ''' - try: - del _finalizer_registry[self._key] - except KeyError: - pass - else: - self._weakref = self._callback = self._args = \ - self._kwargs = self._key = None - - def still_active(self): - ''' - Return whether this finalizer is still waiting to invoke callback - ''' - return self._key in _finalizer_registry - - def __repr__(self): - try: - obj = self._weakref() - except (AttributeError, TypeError): - obj = None - - if obj is None: - return '' - - x = '' - - -def _run_finalizers(minpriority=None, - _finalizer_registry=_finalizer_registry, - sub_debug=sub_debug, error=error): - ''' - Run all finalizers whose exit priority is not None and at least minpriority - - Finalizers with highest priority are called first; finalizers with - the same priority will be called in reverse order of creation. - ''' - if minpriority is None: - f = lambda p: p[0][0] is not None - else: - f = lambda p: p[0][0] is not None and p[0][0] >= minpriority - - items = [x for x in _finalizer_registry.items() if f(x)] - items.sort(reverse=True) - - for key, finalizer in items: - sub_debug('calling %s', finalizer) - try: - finalizer() - except Exception: - if not error("Error calling finalizer %r", finalizer, - exc_info=True): - import traceback - traceback.print_exc() - - if minpriority is None: - _finalizer_registry.clear() - - -def is_exiting(): - ''' - Returns true if the process is shutting down - ''' - return _exiting or _exiting is None - - -def _exit_function(info=info, debug=debug, - active_children=active_children, - _run_finalizers=_run_finalizers): - ''' - Clean up on exit - ''' - - global _exiting - - info('process shutting down') - debug('running all "atexit" finalizers with priority >= 0') - _run_finalizers(0) - - for p in active_children(): - if p._daemonic: - info('calling terminate() for daemon %s', p.name) - p._popen.terminate() - - for p in active_children(): - info('calling join() for process %s', p.name) - p.join() - - debug('running the remaining "atexit" finalizers') - _run_finalizers() -atexit.register(_exit_function) - - -class ForkAwareThreadLock(object): - - def __init__(self): - self._lock = threading.Lock() - self.acquire = self._lock.acquire - self.release = self._lock.release - register_after_fork(self, ForkAwareThreadLock.__init__) - - -class ForkAwareLocal(threading.local): - - def __init__(self): - register_after_fork(self, lambda obj: obj.__dict__.clear()) - - def __reduce__(self): - return type(self), () - - def _eintr_retry(func): ''' Automatic retry after EINTR. @@ -337,7 +146,7 @@ def _eintr_retry(func): while 1: try: return func(*args, **kwargs) - except OSError, exc: + except OSError as exc: if get_errno(exc) != errno.EINTR: raise return wrapped diff --git a/awx/lib/site-packages/boto/__init__.py b/awx/lib/site-packages/boto/__init__.py index 2c164ded00..786f0f858c 100644 --- a/awx/lib/site-packages/boto/__init__.py +++ b/awx/lib/site-packages/boto/__init__.py @@ -36,7 +36,7 @@ import logging.config import urlparse from boto.exception import InvalidUriError -__version__ = '2.13.3' +__version__ = '2.17.0' Version = __version__ # for backware compatibility UserAgent = 'Boto/%s Python/%s %s/%s' % ( @@ -721,6 +721,29 @@ def connect_support(aws_access_key_id=None, ) +def connect_cloudtrail(aws_access_key_id=None, + aws_secret_access_key=None, + **kwargs): + """ + Connect to AWS CloudTrail + + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.cloudtrail.layer1.CloudtrailConnection` + :return: A connection to the AWS Cloudtrail service + """ + from boto.cloudtrail.layer1 import CloudTrailConnection + return CloudTrailConnection( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + **kwargs + ) + + def storage_uri(uri_str, default_scheme='file', debug=0, validate=True, bucket_storage_uri_class=BucketStorageUri, suppress_consec_slashes=True, is_latest=False): diff --git a/awx/lib/site-packages/boto/auth.py b/awx/lib/site-packages/boto/auth.py index f9426d5621..0d4221d619 100644 --- a/awx/lib/site-packages/boto/auth.py +++ b/awx/lib/site-packages/boto/auth.py @@ -431,13 +431,17 @@ class HmacAuthV4Handler(AuthHandler, HmacKeys): parts = http_request.host.split('.') if self.region_name is not None: region_name = self.region_name - elif parts[1] == 'us-gov': - region_name = 'us-gov-west-1' - else: - if len(parts) == 3: - region_name = 'us-east-1' + elif len(parts) > 1: + if parts[1] == 'us-gov': + region_name = 'us-gov-west-1' else: - region_name = parts[1] + if len(parts) == 3: + region_name = 'us-east-1' + else: + region_name = parts[1] + else: + region_name = parts[0] + if self.service_name is not None: service_name = self.service_name else: diff --git a/awx/lib/site-packages/boto/cloudsearch/document.py b/awx/lib/site-packages/boto/cloudsearch/document.py index c799d70787..1f17026fbb 100644 --- a/awx/lib/site-packages/boto/cloudsearch/document.py +++ b/awx/lib/site-packages/boto/cloudsearch/document.py @@ -191,12 +191,9 @@ class DocumentServiceConnection(object): session = requests.Session() adapter = requests.adapters.HTTPAdapter( pool_connections=20, - pool_maxsize=50 + pool_maxsize=50, + max_retries=5 ) - # Now kludge in the right number of retries. - # Once we're requiring ``requests>=1.2.1``, this can become an - # initialization parameter above. - adapter.max_retries = 5 session.mount('http://', adapter) session.mount('https://', adapter) r = session.post(url, data=sdf, headers={'Content-Type': 'application/json'}) diff --git a/awx/lib/site-packages/boto/cloudsearch/search.py b/awx/lib/site-packages/boto/cloudsearch/search.py index ece623a813..a103993ebb 100644 --- a/awx/lib/site-packages/boto/cloudsearch/search.py +++ b/awx/lib/site-packages/boto/cloudsearch/search.py @@ -79,7 +79,7 @@ class SearchResults(object): class Query(object): - + RESULTS_PER_PAGE = 500 def __init__(self, q=None, bq=None, rank=None, @@ -147,7 +147,7 @@ class Query(object): class SearchConnection(object): - + def __init__(self, domain=None, endpoint=None): self.domain = domain self.endpoint = endpoint @@ -209,7 +209,7 @@ class SearchConnection(object): :param facet_sort: Rules used to specify the order in which facet values should be returned. Allowed values are *alpha*, *count*, *max*, *sum*. Use *alpha* to sort alphabetical, and *count* to sort - the facet by number of available result. + the facet by number of available result. ``{'color': 'alpha', 'size': 'count'}`` :type facet_top_n: dict @@ -243,10 +243,10 @@ class SearchConnection(object): the search string. >>> search(bq="'Tim*'") # Return documents with words like Tim or Timothy) - + Search terms can also be combined. Allowed operators are "and", "or", "not", "field", "optional", "token", "phrase", or "filter" - + >>> search(bq="(and 'Tim' (field author 'John Smith'))") Facets allow you to show classification information about the search @@ -258,12 +258,12 @@ class SearchConnection(object): With facet_constraints, facet_top_n and facet_sort more complicated constraints can be specified such as returning the top author out of John Smith and Mark Smith who have a document with the word Tim in it. - - >>> search(q='Tim', - ... facet=['Author'], - ... facet_constraints={'author': "'John Smith','Mark Smith'"}, - ... facet=['author'], - ... facet_top_n={'author': 1}, + + >>> search(q='Tim', + ... facet=['Author'], + ... facet_constraints={'author': "'John Smith','Mark Smith'"}, + ... facet=['author'], + ... facet_top_n={'author': 1}, ... facet_sort={'author': 'count'}) """ @@ -300,9 +300,7 @@ class SearchConnection(object): except AttributeError: pass raise SearchServiceException('Authentication error from Amazon%s' % msg) - raise SearchServiceException("Got non-json response from Amazon") - data['query'] = query - data['search_service'] = self + raise SearchServiceException("Got non-json response from Amazon. %s" % r.content, query) if 'messages' in data and 'error' in data: for m in data['messages']: @@ -311,7 +309,10 @@ class SearchConnection(object): "=> %s" % (params, m['message']), query) elif 'error' in data: raise SearchServiceException("Unknown error processing search %s" - % (params), query) + % json.dumps(data), query) + + data['query'] = query + data['search_service'] = self return SearchResults(**data) diff --git a/awx/lib/site-packages/boto/cloudtrail/__init__.py b/awx/lib/site-packages/boto/cloudtrail/__init__.py new file mode 100644 index 0000000000..836f57fcaf --- /dev/null +++ b/awx/lib/site-packages/boto/cloudtrail/__init__.py @@ -0,0 +1,48 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. +# All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +from boto.regioninfo import RegionInfo + + +def regions(): + """ + Get all available regions for the AWS Cloudtrail service. + + :rtype: list + :return: A list of :class:`boto.regioninfo.RegionInfo` + """ + from boto.cloudtrail.layer1 import CloudTrailConnection + + return [RegionInfo(name='us-east-1', + endpoint='cloudtrail.us-east-1.amazonaws.com', + connection_cls=CloudTrailConnection), + RegionInfo(name='us-west-2', + endpoint='cloudtrail.us-west-2.amazonaws.com', + connection_cls=CloudTrailConnection), + ] + + +def connect_to_region(region_name, **kw_params): + for region in regions(): + if region.name == region_name: + return region.connect(**kw_params) + return None diff --git a/awx/lib/site-packages/boto/cloudtrail/exceptions.py b/awx/lib/site-packages/boto/cloudtrail/exceptions.py new file mode 100644 index 0000000000..35c5c3d31d --- /dev/null +++ b/awx/lib/site-packages/boto/cloudtrail/exceptions.py @@ -0,0 +1,86 @@ +""" +Exceptions that are specific to the cloudtrail module. +""" +from boto.exception import BotoServerError + + +class InvalidSnsTopicNameException(BotoServerError): + """ + Raised when an invalid SNS topic name is passed to Cloudtrail. + """ + pass + + +class InvalidS3BucketNameException(BotoServerError): + """ + Raised when an invalid S3 bucket name is passed to Cloudtrail. + """ + pass + + +class TrailAlreadyExistsException(BotoServerError): + """ + Raised when the given trail name already exists. + """ + pass + +class InsufficientSnsTopicPolicyException(BotoServerError): + """ + Raised when the SNS topic does not allow Cloudtrail to post + messages. + """ + pass + +class InvalidTrailNameException(BotoServerError): + """ + Raised when the trail name is invalid. + """ + pass + +class InternalErrorException(BotoServerError): + """ + Raised when there was an internal Cloudtrail error. + """ + pass + +class TrailNotFoundException(BotoServerError): + """ + Raised when the given trail name is not found. + """ + pass + + +class S3BucketDoesNotExistException(BotoServerError): + """ + Raised when the given S3 bucket does not exist. + """ + pass + + +class TrailNotProvidedException(BotoServerError): + """ + Raised when no trail name was provided. + """ + pass + + +class InvalidS3PrefixException(BotoServerError): + """ + Raised when an invalid key prefix is given. + """ + pass + + +class MaximumNumberOfTrailsExceededException(BotoServerError): + """ + Raised when no more trails can be created. + """ + pass + + +class InsufficientS3BucketPolicyException(BotoServerError): + """ + Raised when the S3 bucket does not allow Cloudtrail to + write files into the prefix. + """ + pass diff --git a/awx/lib/site-packages/boto/cloudtrail/layer1.py b/awx/lib/site-packages/boto/cloudtrail/layer1.py new file mode 100644 index 0000000000..e1e2145338 --- /dev/null +++ b/awx/lib/site-packages/boto/cloudtrail/layer1.py @@ -0,0 +1,309 @@ +# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# + +try: + import json +except ImportError: + import simplejson as json + +import boto +from boto.connection import AWSQueryConnection +from boto.regioninfo import RegionInfo +from boto.exception import JSONResponseError +from boto.cloudtrail import exceptions + + +class CloudTrailConnection(AWSQueryConnection): + """ + AWS Cloud Trail + This is the CloudTrail API Reference. It provides descriptions of + actions, data types, common parameters, and common errors for + CloudTrail. + + CloudTrail is a web service that records AWS API calls for your + AWS account and delivers log files to an Amazon S3 bucket. The + recorded information includes the identity of the user, the start + time of the event, the source IP address, the request parameters, + and the response elements returned by the service. + + As an alternative to using the API, you can use one of the AWS + SDKs, which consist of libraries and sample code for various + programming languages and platforms (Java, Ruby, .NET, iOS, + Android, etc.). The SDKs provide a convenient way to create + programmatic access to AWSCloudTrail. For example, the SDKs take + care of cryptographically signing requests, managing errors, and + retrying requests automatically. For information about the AWS + SDKs, including how to download and install them, see the Tools + for Amazon Web Services page. + + See the CloudTrail User Guide for information about the data that + is included with each event listed in the log files. + """ + APIVersion = "2013-11-01" + DefaultRegionName = "us-east-1" + DefaultRegionEndpoint = "cloudtrail.us-east-1.amazonaws.com" + ServiceName = "CloudTrail" + TargetPrefix = "com.amazonaws.cloudtrail.v20131101.CloudTrail_20131101" + ResponseError = JSONResponseError + + _faults = { + "InvalidSnsTopicNameException": exceptions.InvalidSnsTopicNameException, + "InvalidS3BucketNameException": exceptions.InvalidS3BucketNameException, + "TrailAlreadyExistsException": exceptions.TrailAlreadyExistsException, + "InsufficientSnsTopicPolicyException": exceptions.InsufficientSnsTopicPolicyException, + "InvalidTrailNameException": exceptions.InvalidTrailNameException, + "InternalErrorException": exceptions.InternalErrorException, + "TrailNotFoundException": exceptions.TrailNotFoundException, + "S3BucketDoesNotExistException": exceptions.S3BucketDoesNotExistException, + "TrailNotProvidedException": exceptions.TrailNotProvidedException, + "InvalidS3PrefixException": exceptions.InvalidS3PrefixException, + "MaximumNumberOfTrailsExceededException": exceptions.MaximumNumberOfTrailsExceededException, + "InsufficientS3BucketPolicyException": exceptions.InsufficientS3BucketPolicyException, + } + + + def __init__(self, **kwargs): + region = kwargs.pop('region', None) + if not region: + region = RegionInfo(self, self.DefaultRegionName, + self.DefaultRegionEndpoint) + + if 'host' not in kwargs: + kwargs['host'] = region.endpoint + + AWSQueryConnection.__init__(self, **kwargs) + self.region = region + + def _required_auth_capability(self): + return ['hmac-v4'] + + def create_trail(self, trail=None): + """ + From the command line, use create-subscription. + + Creates a trail that specifies the settings for delivery of + log data to an Amazon S3 bucket. The request includes a Trail + structure that specifies the following: + + + + Trail name. + + The name of the Amazon S3 bucket to which CloudTrail + delivers your log files. + + The name of the Amazon S3 key prefix that precedes each log + file. + + The name of the Amazon SNS topic that notifies you that a + new file is available in your bucket. + + Whether the log file should include events from global + services. Currently, the only events included in CloudTrail + log files are from IAM and AWS STS. + + + Returns the appropriate HTTP status code if successful. If + not, it returns either one of the CommonErrors or a + FrontEndException with one of the following error codes: + + **MaximumNumberOfTrailsExceeded** + + An attempt was made to create more trails than allowed. You + can only create one trail for each account in each region. + + **TrailAlreadyExists** + + At attempt was made to create a trail with a name that already + exists. + + **S3BucketDoesNotExist** + + Specified Amazon S3 bucket does not exist. + + **InsufficientS3BucketPolicy** + + Policy on Amazon S3 bucket does not permit CloudTrail to write + to your bucket. See the AWS AWS CloudTrail User Guide for the + required bucket policy. + + **InsufficientSnsTopicPolicy** + + The policy on Amazon SNS topic does not permit CloudTrail to + write to it. Can also occur when an Amazon SNS topic does not + exist. + + :type trail: dict + :param trail: Contains the Trail structure that specifies the settings + for each trail. + + """ + params = {} + if trail is not None: + params['trail'] = trail + return self.make_request(action='CreateTrail', + body=json.dumps(params)) + + def delete_trail(self, name=None): + """ + Deletes a trail. + + :type name: string + :param name: The name of a trail to be deleted. + + """ + params = {} + if name is not None: + params['Name'] = name + return self.make_request(action='DeleteTrail', + body=json.dumps(params)) + + def describe_trails(self, trail_name_list=None): + """ + Retrieves the settings for some or all trails associated with + an account. Returns a list of Trail structures in JSON format. + + :type trail_name_list: list + :param trail_name_list: The list of Trail object names. + + """ + params = {} + if trail_name_list is not None: + params['trailNameList'] = trail_name_list + return self.make_request(action='DescribeTrails', + body=json.dumps(params)) + + def get_trail_status(self, name=None): + """ + Returns GetTrailStatusResult, which contains a JSON-formatted + list of information about the trail specified in the request. + JSON fields include information such as delivery errors, + Amazon SNS and Amazon S3 errors, and times that logging + started and stopped for each trail. + + :type name: string + :param name: The name of the trail for which you are requesting the + current status. + + """ + params = {} + if name is not None: + params['Name'] = name + return self.make_request(action='GetTrailStatus', + body=json.dumps(params)) + + def start_logging(self, name=None): + """ + Starts the processing of recording user activity events and + log file delivery for a trail. + + :type name: string + :param name: The name of the Trail for which CloudTrail logs events. + + """ + params = {} + if name is not None: + params['Name'] = name + return self.make_request(action='StartLogging', + body=json.dumps(params)) + + def stop_logging(self, name=None): + """ + Suspends the recording of user activity events and log file + delivery for the specified trail. Under most circumstances, + there is no need to use this action. You can update a trail + without stopping it first. This action is the only way to stop + logging activity. + + :type name: string + :param name: Communicates to CloudTrail the name of the Trail for which + to stop logging events. + + """ + params = {} + if name is not None: + params['Name'] = name + return self.make_request(action='StopLogging', + body=json.dumps(params)) + + def update_trail(self, trail=None): + """ + From the command line, use update-subscription. + + Updates the settings that specify delivery of log files. + Changes to a trail do not require stopping the CloudTrail + service. You can use this action to designate an existing + bucket for log delivery, or to create a new bucket and prefix. + If the existing bucket has previously been a target for + CloudTrail log files, an IAM policy exists for the bucket. If + you create a new bucket using UpdateTrail, you need to apply + the policy to the bucket using one of the means provided by + the Amazon S3 service. + + The request includes a Trail structure that specifies the + following: + + + + Trail name. + + The name of the Amazon S3 bucket to which CloudTrail + delivers your log files. + + The name of the Amazon S3 key prefix that precedes each log + file. + + The name of the Amazon SNS topic that notifies you that a + new file is available in your bucket. + + Whether the log file should include events from global + services, such as IAM or AWS STS. + + **CreateTrail** returns the appropriate HTTP status code if + successful. If not, it returns either one of the common errors + or one of the exceptions listed at the end of this page. + + :type trail: dict + :param trail: Represents the Trail structure that contains the + CloudTrail setting for an account. + + """ + params = {} + if trail is not None: + params['trail'] = trail + return self.make_request(action='UpdateTrail', + body=json.dumps(params)) + + def make_request(self, action, body): + headers = { + 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), + 'Host': self.region.endpoint, + 'Content-Type': 'application/x-amz-json-1.1', + 'Content-Length': str(len(body)), + } + http_request = self.build_base_http_request( + method='POST', path='/', auth_path='/', params={}, + headers=headers, data=body) + response = self._mexe(http_request, sender=None, + override_num_retries=10) + response_body = response.read() + boto.log.debug(response_body) + if response.status == 200: + if response_body: + return json.loads(response_body) + else: + json_body = json.loads(response_body) + fault_name = json_body.get('__type', None) + exception_class = self._faults.get(fault_name, self.ResponseError) + raise exception_class(response.status, response.reason, + body=json_body) + diff --git a/awx/lib/site-packages/boto/connection.py b/awx/lib/site-packages/boto/connection.py index 950a8fe703..78c7a22370 100644 --- a/awx/lib/site-packages/boto/connection.py +++ b/awx/lib/site-packages/boto/connection.py @@ -101,7 +101,7 @@ DEFAULT_CA_CERTS_FILE = os.path.join(os.path.dirname(os.path.abspath(boto.cacert class HostConnectionPool(object): """ - A pool of connections for one remote (host,is_secure). + A pool of connections for one remote (host,port,is_secure). When connections are added to the pool, they are put into a pending queue. The _mexe method returns connections to the pool @@ -145,7 +145,7 @@ class HostConnectionPool(object): def get(self): """ Returns the next connection in this pool that is ready to be - reused. Returns None of there aren't any. + reused. Returns None if there aren't any. """ # Discard ready connections that are too old. self.clean() @@ -234,7 +234,7 @@ class ConnectionPool(object): STALE_DURATION = 60.0 def __init__(self): - # Mapping from (host,is_secure) to HostConnectionPool. + # Mapping from (host,port,is_secure) to HostConnectionPool. # If a pool becomes empty, it is removed. self.host_to_pool = {} # The last time the pool was cleaned. @@ -259,7 +259,7 @@ class ConnectionPool(object): """ return sum(pool.size() for pool in self.host_to_pool.values()) - def get_http_connection(self, host, is_secure): + def get_http_connection(self, host, port, is_secure): """ Gets a connection from the pool for the named host. Returns None if there is no connection that can be reused. It's the caller's @@ -268,18 +268,18 @@ class ConnectionPool(object): """ self.clean() with self.mutex: - key = (host, is_secure) + key = (host, port, is_secure) if key not in self.host_to_pool: return None return self.host_to_pool[key].get() - def put_http_connection(self, host, is_secure, conn): + def put_http_connection(self, host, port, is_secure, conn): """ Adds a connection to the pool of connections that can be reused for the named host. """ with self.mutex: - key = (host, is_secure) + key = (host, port, is_secure) if key not in self.host_to_pool: self.host_to_pool[key] = HostConnectionPool() self.host_to_pool[key].put(conn) @@ -486,6 +486,11 @@ class AWSAuthConnection(object): "2.6 or later.") self.ca_certificates_file = config.get_value( 'Boto', 'ca_certificates_file', DEFAULT_CA_CERTS_FILE) + if port: + self.port = port + else: + self.port = PORTS_BY_SECURITY[is_secure] + self.handle_proxy(proxy, proxy_port, proxy_user, proxy_pass) # define exceptions from httplib that we want to catch and retry self.http_exceptions = (httplib.HTTPException, socket.error, @@ -513,10 +518,6 @@ class AWSAuthConnection(object): if not isinstance(debug, (int, long)): debug = 0 self.debug = config.getint('Boto', 'debug', debug) - if port: - self.port = port - else: - self.port = PORTS_BY_SECURITY[is_secure] self.host_header = None # Timeout used to tell httplib how long to wait for socket timeouts. @@ -551,7 +552,7 @@ class AWSAuthConnection(object): self.host_header = self.provider.host_header self._pool = ConnectionPool() - self._connection = (self.server_name(), self.is_secure) + self._connection = (self.host, self.port, self.is_secure) self._last_rs = None self._auth_handler = auth.get_auth_handler( host, config, self.provider, self._required_auth_capability()) @@ -652,7 +653,7 @@ class AWSAuthConnection(object): if 'http_proxy' in os.environ and not self.proxy: pattern = re.compile( '(?:http://)?' \ - '(?:(?P\w+):(?P.*)@)?' \ + '(?:(?P[\w\-\.]+):(?P.*)@)?' \ '(?P[\w\-\.]+)' \ '(?::(?P\d+))?' ) @@ -680,12 +681,12 @@ class AWSAuthConnection(object): self.no_proxy = os.environ.get('no_proxy', '') or os.environ.get('NO_PROXY', '') self.use_proxy = (self.proxy != None) - def get_http_connection(self, host, is_secure): - conn = self._pool.get_http_connection(host, is_secure) + def get_http_connection(self, host, port, is_secure): + conn = self._pool.get_http_connection(host, port, is_secure) if conn is not None: return conn else: - return self.new_http_connection(host, is_secure) + return self.new_http_connection(host, port, is_secure) def skip_proxy(self, host): if not self.no_proxy: @@ -703,16 +704,29 @@ class AWSAuthConnection(object): return False - def new_http_connection(self, host, is_secure): - if self.use_proxy and not is_secure and \ - not self.skip_proxy(host): - host = '%s:%d' % (self.proxy, int(self.proxy_port)) + def new_http_connection(self, host, port, is_secure): if host is None: host = self.server_name() + + # Make sure the host is really just the host, not including + # the port number + host = host.split(':', 1)[0] + + http_connection_kwargs = self.http_connection_kwargs.copy() + + # Connection factories below expect a port keyword argument + http_connection_kwargs['port'] = port + + # Override host with proxy settings if needed + if self.use_proxy and not is_secure and \ + not self.skip_proxy(host): + host = self.proxy + http_connection_kwargs['port'] = int(self.proxy_port) + if is_secure: boto.log.debug( 'establishing HTTPS connection: host=%s, kwargs=%s', - host, self.http_connection_kwargs) + host, http_connection_kwargs) if self.use_proxy and not self.skip_proxy(host): connection = self.proxy_ssl(host, is_secure and 443 or 80) elif self.https_connection_factory: @@ -720,35 +734,35 @@ class AWSAuthConnection(object): elif self.https_validate_certificates and HAVE_HTTPS_CONNECTION: connection = https_connection.CertValidatingHTTPSConnection( host, ca_certs=self.ca_certificates_file, - **self.http_connection_kwargs) + **http_connection_kwargs) else: connection = httplib.HTTPSConnection(host, - **self.http_connection_kwargs) + **http_connection_kwargs) else: boto.log.debug('establishing HTTP connection: kwargs=%s' % - self.http_connection_kwargs) + http_connection_kwargs) if self.https_connection_factory: # even though the factory says https, this is too handy # to not be able to allow overriding for http also. connection = self.https_connection_factory(host, - **self.http_connection_kwargs) + **http_connection_kwargs) else: connection = httplib.HTTPConnection(host, - **self.http_connection_kwargs) + **http_connection_kwargs) if self.debug > 1: connection.set_debuglevel(self.debug) # self.connection must be maintained for backwards-compatibility # however, it must be dynamically pulled from the connection pool # set a private variable which will enable that if host.split(':')[0] == self.host and is_secure == self.is_secure: - self._connection = (host, is_secure) + self._connection = (host, port, is_secure) # Set the response class of the http connection to use our custom # class. connection.response_class = HTTPResponse return connection - def put_http_connection(self, host, is_secure, connection): - self._pool.put_http_connection(host, is_secure, connection) + def put_http_connection(self, host, port, is_secure, connection): + self._pool.put_http_connection(host, port, is_secure, connection) def proxy_ssl(self, host=None, port=None): if host and port: @@ -841,6 +855,7 @@ class AWSAuthConnection(object): boto.log.debug('Data: %s' % request.body) boto.log.debug('Headers: %s' % request.headers) boto.log.debug('Host: %s' % request.host) + boto.log.debug('Port: %s' % request.port) boto.log.debug('Params: %s' % request.params) response = None body = None @@ -850,7 +865,8 @@ class AWSAuthConnection(object): else: num_retries = override_num_retries i = 0 - connection = self.get_http_connection(request.host, self.is_secure) + connection = self.get_http_connection(request.host, request.port, + self.is_secure) while i <= num_retries: # Use binary exponential backoff to desynchronize client requests. next_sleep = random.random() * (2 ** i) @@ -858,6 +874,12 @@ class AWSAuthConnection(object): # we now re-sign each request before it is retried boto.log.debug('Token: %s' % self.provider.security_token) request.authorize(connection=self) + # Only force header for non-s3 connections, because s3 uses + # an older signing method + bucket resource URLs that include + # the port info. All others should be now be up to date and + # not include the port. + if 's3' not in self._required_auth_capability(): + request.headers['Host'] = self.host.split(':', 1)[0] if callable(sender): response = sender(connection, request.method, request.path, request.body, request.headers) @@ -880,31 +902,45 @@ class AWSAuthConnection(object): boto.log.debug(msg) time.sleep(next_sleep) continue - if response.status == 500 or response.status == 503: + if response.status in [500, 502, 503, 504]: msg = 'Received %d response. ' % response.status msg += 'Retrying in %3.1f seconds' % next_sleep boto.log.debug(msg) body = response.read() elif response.status < 300 or response.status >= 400 or \ not location: - self.put_http_connection(request.host, self.is_secure, - connection) + # don't return connection to the pool if response contains + # Connection:close header, because the connection has been + # closed and default reconnect behavior may do something + # different than new_http_connection. Also, it's probably + # less efficient to try to reuse a closed connection. + conn_header_value = response.getheader('connection') + if conn_header_value == 'close': + connection.close() + else: + self.put_http_connection(request.host, request.port, + self.is_secure, connection) return response else: scheme, request.host, request.path, \ params, query, fragment = urlparse.urlparse(location) if query: request.path += '?' + query + # urlparse can return both host and port in netloc, so if + # that's the case we need to split them up properly + if ':' in request.host: + request.host, request.port = request.host.split(':', 1) msg = 'Redirecting: %s' % scheme + '://' msg += request.host + request.path boto.log.debug(msg) connection = self.get_http_connection(request.host, + request.port, scheme == 'https') response = None continue except PleaseRetryException, e: boto.log.debug('encountered a retry exception: %s' % e) - connection = self.new_http_connection(request.host, + connection = self.new_http_connection(request.host, request.port, self.is_secure) response = e.response except self.http_exceptions, e: @@ -913,10 +949,10 @@ class AWSAuthConnection(object): boto.log.debug( 'encountered unretryable %s exception, re-raising' % e.__class__.__name__) - raise e + raise boto.log.debug('encountered %s exception, reconnecting' % \ e.__class__.__name__) - connection = self.new_http_connection(request.host, + connection = self.new_http_connection(request.host, request.port, self.is_secure) time.sleep(next_sleep) i += 1 @@ -927,7 +963,7 @@ class AWSAuthConnection(object): if response: raise BotoServerError(response.status, response.reason, body) elif e: - raise e + raise else: msg = 'Please report this exception as a Boto Issue!' raise BotoClientError(msg) @@ -1006,7 +1042,7 @@ class AWSQueryConnection(AWSAuthConnection): def make_request(self, action, params=None, path='/', verb='GET'): http_request = self.build_base_http_request(verb, path, None, params, {}, '', - self.server_name()) + self.host) if action: http_request.params['Action'] = action if self.APIVersion: diff --git a/awx/lib/site-packages/boto/dynamodb/item.py b/awx/lib/site-packages/boto/dynamodb/item.py index b2b444d762..9d9290963d 100644 --- a/awx/lib/site-packages/boto/dynamodb/item.py +++ b/awx/lib/site-packages/boto/dynamodb/item.py @@ -50,11 +50,11 @@ class Item(dict): if range_key == None: range_key = attrs.get(self._range_key_name, None) self[self._range_key_name] = range_key + self._updates = {} for key, value in attrs.items(): if key != self._hash_key_name and key != self._range_key_name: self[key] = value self.consumed_units = 0 - self._updates = {} @property def hash_key(self): diff --git a/awx/lib/site-packages/boto/dynamodb/types.py b/awx/lib/site-packages/boto/dynamodb/types.py index e3b495884c..987e0d0f10 100644 --- a/awx/lib/site-packages/boto/dynamodb/types.py +++ b/awx/lib/site-packages/boto/dynamodb/types.py @@ -277,6 +277,10 @@ class Dynamizer(object): if len(attr) > 1 or not attr: return attr dynamodb_type = attr.keys()[0] + if dynamodb_type.lower() == dynamodb_type: + # It's not an actual type, just a single character attr that + # overlaps with the DDB types. Return it. + return attr try: decoder = getattr(self, '_decode_%s' % dynamodb_type.lower()) except AttributeError: diff --git a/awx/lib/site-packages/boto/dynamodb2/layer1.py b/awx/lib/site-packages/boto/dynamodb2/layer1.py index 039866f53b..796a6a79b6 100644 --- a/awx/lib/site-packages/boto/dynamodb2/layer1.py +++ b/awx/lib/site-packages/boto/dynamodb2/layer1.py @@ -21,7 +21,11 @@ # from binascii import crc32 -import json +try: + import json +except ImportError: + import simplejson as json + import boto from boto.connection import AWSQueryConnection from boto.regioninfo import RegionInfo @@ -67,7 +71,11 @@ class DynamoDBConnection(AWSQueryConnection): if reg.name == region_name: region = reg break - kwargs['host'] = region.endpoint + + # Only set host if it isn't manually overwritten + if 'host' not in kwargs: + kwargs['host'] = region.endpoint + AWSQueryConnection.__init__(self, **kwargs) self.region = region self._validate_checksums = boto.config.getbool( @@ -1467,13 +1475,13 @@ class DynamoDBConnection(AWSQueryConnection): def make_request(self, action, body): headers = { 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), - 'Host': self.region.endpoint, + 'Host': self.host, 'Content-Type': 'application/x-amz-json-1.0', 'Content-Length': str(len(body)), } http_request = self.build_base_http_request( method='POST', path='/', auth_path='/', params={}, - headers=headers, data=body) + headers=headers, data=body, host=self.host) response = self._mexe(http_request, sender=None, override_num_retries=self.NumberRetries, retry_handler=self._retry_handler) diff --git a/awx/lib/site-packages/boto/dynamodb2/table.py b/awx/lib/site-packages/boto/dynamodb2/table.py index 5d6803ceac..b8bc47302a 100644 --- a/awx/lib/site-packages/boto/dynamodb2/table.py +++ b/awx/lib/site-packages/boto/dynamodb2/table.py @@ -418,6 +418,45 @@ class Table(object): item.load(item_data) return item + def lookup(self, *args, **kwargs): + """ + Look up an entry in DynamoDB. This is mostly backwards compatible + with boto.dynamodb. Unlike get_item, it takes hash_key and range_key first, + although you may still specify keyword arguments instead. + + Also unlike the get_item command, if the returned item has no keys + (i.e., it does not exist in DynamoDB), a None result is returned, instead + of an empty key object. + + Example:: + >>> user = users.lookup(username) + >>> user = users.lookup(username, consistent=True) + >>> app = apps.lookup('my_customer_id', 'my_app_id') + + """ + if not self.schema: + self.describe() + for x, arg in enumerate(args): + kwargs[self.schema[x].name] = arg + ret = self.get_item(**kwargs) + if not ret.keys(): + return None + return ret + + def new_item(self, *args): + """ + Returns a new, blank item + + This is mostly for consistency with boto.dynamodb + """ + if not self.schema: + self.describe() + data = {} + for x, arg in enumerate(args): + data[self.schema[x].name] = arg + return Item(self, data=data) + + def put_item(self, data, overwrite=False): """ Saves an entire item to DynamoDB. @@ -1164,4 +1203,4 @@ class BatchTable(object): self.handle_unprocessed(resp) boto.log.info( "%s unprocessed items left" % len(self._unprocessed) - ) \ No newline at end of file + ) diff --git a/awx/lib/site-packages/boto/ec2/autoscale/__init__.py b/awx/lib/site-packages/boto/ec2/autoscale/__init__.py index 9a8270dbc8..2a54adf15d 100644 --- a/awx/lib/site-packages/boto/ec2/autoscale/__init__.py +++ b/awx/lib/site-packages/boto/ec2/autoscale/__init__.py @@ -241,6 +241,10 @@ class AutoScaleConnection(AWSQueryConnection): params['EbsOptimized'] = 'true' else: params['EbsOptimized'] = 'false' + if launch_config.associate_public_ip_address is True: + params['AssociatePublicIpAddress'] = 'true' + elif launch_config.associate_public_ip_address is False: + params['AssociatePublicIpAddress'] = 'false' return self.get_object('CreateLaunchConfiguration', params, Request, verb='POST') @@ -492,15 +496,19 @@ class AutoScaleConnection(AWSQueryConnection): If no group name or list of policy names are provided, all available policies are returned. - :type as_name: str - :param as_name: The name of the + :type as_group: str + :param as_group: The name of the :class:`boto.ec2.autoscale.group.AutoScalingGroup` to filter for. - :type names: list - :param names: List of policy names which should be searched for. + :type policy_names: list + :param policy_names: List of policy names which should be searched for. :type max_records: int :param max_records: Maximum amount of groups to return. + + :type next_token: str + :param next_token: If you have more results than can be returned + at once, pass in this parameter to page through all results. """ params = {} if as_group: @@ -681,9 +689,9 @@ class AutoScaleConnection(AWSQueryConnection): Configures an Auto Scaling group to send notifications when specified events take place. - :type as_group: str or + :type autoscale_group: str or :class:`boto.ec2.autoscale.group.AutoScalingGroup` object - :param as_group: The Auto Scaling group to put notification + :param autoscale_group: The Auto Scaling group to put notification configuration on. :type topic: str @@ -692,7 +700,12 @@ class AutoScaleConnection(AWSQueryConnection): :type notification_types: list :param notification_types: The type of events that will trigger - the notification. + the notification. Valid types are: + 'autoscaling:EC2_INSTANCE_LAUNCH', + 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', + 'autoscaling:EC2_INSTANCE_TERMINATE', + 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR', + 'autoscaling:TEST_NOTIFICATION' """ name = autoscale_group @@ -704,6 +717,29 @@ class AutoScaleConnection(AWSQueryConnection): self.build_list_params(params, notification_types, 'NotificationTypes') return self.get_status('PutNotificationConfiguration', params) + def delete_notification_configuration(self, autoscale_group, topic): + """ + Deletes notifications created by put_notification_configuration. + + :type autoscale_group: str or + :class:`boto.ec2.autoscale.group.AutoScalingGroup` object + :param autoscale_group: The Auto Scaling group to put notification + configuration on. + + :type topic: str + :param topic: The Amazon Resource Name (ARN) of the Amazon Simple + Notification Service (SNS) topic. + """ + + name = autoscale_group + if isinstance(autoscale_group, AutoScalingGroup): + name = autoscale_group.name + + params = {'AutoScalingGroupName': name, + 'TopicARN': topic} + + return self.get_status('DeleteNotificationConfiguration', params) + def set_instance_health(self, instance_id, health_status, should_respect_grace_period=True): """ diff --git a/awx/lib/site-packages/boto/ec2/autoscale/group.py b/awx/lib/site-packages/boto/ec2/autoscale/group.py index e9fadce1b7..12fdb20d25 100644 --- a/awx/lib/site-packages/boto/ec2/autoscale/group.py +++ b/awx/lib/site-packages/boto/ec2/autoscale/group.py @@ -148,6 +148,9 @@ class AutoScalingGroup(object): :type vpc_zone_identifier: str :param vpc_zone_identifier: The subnet identifier of the Virtual Private Cloud. + + :type tags: list + :param tags: List of :class:`boto.ec2.autoscale.tag.Tag`s :type termination_policies: list :param termination_policies: A list of termination policies. Valid values @@ -296,12 +299,23 @@ class AutoScalingGroup(object): def put_notification_configuration(self, topic, notification_types): """ Configures an Auto Scaling group to send notifications when - specified events take place. + specified events take place. Valid notification types are: + 'autoscaling:EC2_INSTANCE_LAUNCH', + 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', + 'autoscaling:EC2_INSTANCE_TERMINATE', + 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR', + 'autoscaling:TEST_NOTIFICATION' """ return self.connection.put_notification_configuration(self, topic, notification_types) + def delete_notification_configuration(self, topic): + """ + Deletes notifications created by put_notification_configuration. + """ + return self.connection.delete_notification_configuration(self, topic) + def suspend_processes(self, scaling_processes=None): """ Suspends Auto Scaling processes for an Auto Scaling group. diff --git a/awx/lib/site-packages/boto/ec2/autoscale/launchconfig.py b/awx/lib/site-packages/boto/ec2/autoscale/launchconfig.py index f558041a2f..7e31592964 100644 --- a/awx/lib/site-packages/boto/ec2/autoscale/launchconfig.py +++ b/awx/lib/site-packages/boto/ec2/autoscale/launchconfig.py @@ -94,7 +94,8 @@ class LaunchConfiguration(object): instance_type='m1.small', kernel_id=None, ramdisk_id=None, block_device_mappings=None, instance_monitoring=False, spot_price=None, - instance_profile_name=None, ebs_optimized=False): + instance_profile_name=None, ebs_optimized=False, + associate_public_ip_address=None): """ A launch configuration. @@ -109,8 +110,9 @@ class LaunchConfiguration(object): :param key_name: The name of the EC2 key pair. :type security_groups: list - :param security_groups: Names of the security groups with which to - associate the EC2 instances. + :param security_groups: Names or security group id's of the security + groups with which to associate the EC2 instances or VPC instances, + respectively. :type user_data: str :param user_data: The user data available to launched EC2 instances. @@ -144,6 +146,10 @@ class LaunchConfiguration(object): :type ebs_optimized: bool :param ebs_optimized: Specifies whether the instance is optimized for EBS I/O (true) or not (false). + + :type associate_public_ip_address: bool + :param associate_public_ip_address: Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. + Specifies whether to assign a public IP address to each instance launched in a Amazon VPC. """ self.connection = connection self.name = name @@ -163,6 +169,7 @@ class LaunchConfiguration(object): self.instance_profile_name = instance_profile_name self.launch_configuration_arn = None self.ebs_optimized = ebs_optimized + self.associate_public_ip_address = associate_public_ip_address def __repr__(self): return 'LaunchConfiguration:%s' % self.name diff --git a/awx/lib/site-packages/boto/ec2/autoscale/tag.py b/awx/lib/site-packages/boto/ec2/autoscale/tag.py index ad9641d5bb..a783edf096 100644 --- a/awx/lib/site-packages/boto/ec2/autoscale/tag.py +++ b/awx/lib/site-packages/boto/ec2/autoscale/tag.py @@ -55,11 +55,11 @@ class Tag(object): self.key = value elif name == 'Value': self.value = value - elif name == 'PropogateAtLaunch': + elif name == 'PropagateAtLaunch': if value.lower() == 'true': - self.propogate_at_launch = True + self.propagate_at_launch = True else: - self.propogate_at_launch = False + self.propagate_at_launch = False elif name == 'ResourceId': self.resource_id = value elif name == 'ResourceType': diff --git a/awx/lib/site-packages/boto/ec2/cloudwatch/alarm.py b/awx/lib/site-packages/boto/ec2/cloudwatch/alarm.py index e0f72421e1..9215eb08c9 100644 --- a/awx/lib/site-packages/boto/ec2/cloudwatch/alarm.py +++ b/awx/lib/site-packages/boto/ec2/cloudwatch/alarm.py @@ -95,7 +95,7 @@ class MetricAlarm(object): statistic is applied. :type evaluation_periods: int - :param evaluation_period: The number of periods over which data is + :param evaluation_periods: The number of periods over which data is compared to the specified threshold. :type unit: str @@ -112,9 +112,16 @@ class MetricAlarm(object): :type description: str :param description: Description of MetricAlarm - :type dimensions: list of dicts - :param description: Dimensions of alarm, such as: - [{'InstanceId':['i-0123456,i-0123457']}] + :type dimensions: dict + :param dimensions: A dictionary of dimension key/values where + the key is the dimension name and the value + is either a scalar value or an iterator + of values to be associated with that + dimension. + Example: { + 'InstanceId': ['i-0123456', 'i-0123457'], + 'LoadBalancerName': 'test-lb' + } :type alarm_actions: list of strs :param alarm_actions: A list of the ARNs of the actions to take in diff --git a/awx/lib/site-packages/boto/ec2/connection.py b/awx/lib/site-packages/boto/ec2/connection.py index 71da5f0ba9..803f3fed8e 100644 --- a/awx/lib/site-packages/boto/ec2/connection.py +++ b/awx/lib/site-packages/boto/ec2/connection.py @@ -69,7 +69,7 @@ from boto.exception import EC2ResponseError class EC2Connection(AWSQueryConnection): - APIVersion = boto.config.get('Boto', 'ec2_version', '2013-07-15') + APIVersion = boto.config.get('Boto', 'ec2_version', '2013-10-01') DefaultRegionName = boto.config.get('Boto', 'ec2_region_name', 'us-east-1') DefaultRegionEndpoint = boto.config.get('Boto', 'ec2_region_endpoint', 'ec2.us-east-1.amazonaws.com') @@ -260,7 +260,7 @@ class EC2Connection(AWSQueryConnection): def register_image(self, name=None, description=None, image_location=None, architecture=None, kernel_id=None, ramdisk_id=None, root_device_name=None, block_device_map=None, - dry_run=False): + dry_run=False, virtualization_type=None): """ Register an image. @@ -293,6 +293,12 @@ class EC2Connection(AWSQueryConnection): :type dry_run: bool :param dry_run: Set to True if the operation should not actually run. + :type virtualization_type: string + :param virtualization_type: The virutalization_type of the image. + Valid choices are: + * paravirtual + * hvm + :rtype: string :return: The new image id """ @@ -315,6 +321,9 @@ class EC2Connection(AWSQueryConnection): block_device_map.ec2_build_list_params(params) if dry_run: params['DryRun'] = 'true' + if virtualization_type: + params['VirtualizationType'] = virtualization_type + rs = self.get_object('RegisterImage', params, ResultSet, verb='POST') image_id = getattr(rs, 'imageId', None) return image_id @@ -355,7 +364,8 @@ class EC2Connection(AWSQueryConnection): return result def create_image(self, instance_id, name, - description=None, no_reboot=False, dry_run=False): + description=None, no_reboot=False, + block_device_mapping=None, dry_run=False): """ Will create an AMI from the instance in the running or stopped state. @@ -377,6 +387,10 @@ class EC2Connection(AWSQueryConnection): responsibility of maintaining file system integrity is left to the owner of the instance. + :type block_device_mapping: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` + :param block_device_mapping: A BlockDeviceMapping data structure + describing the EBS volumes associated with the Image. + :type dry_run: bool :param dry_run: Set to True if the operation should not actually run. @@ -389,6 +403,8 @@ class EC2Connection(AWSQueryConnection): params['Description'] = description if no_reboot: params['NoReboot'] = 'true' + if block_device_mapping: + block_device_mapping.ec2_build_list_params(params) if dry_run: params['DryRun'] = 'true' img = self.get_object('CreateImage', params, Image, verb='POST') @@ -1500,7 +1516,7 @@ class EC2Connection(AWSQueryConnection): if dry_run: params['DryRun'] = 'true' return self.get_list('CancelSpotInstanceRequests', params, - [('item', Instance)], verb='POST') + [('item', SpotInstanceRequest)], verb='POST') def get_spot_datafeed_subscription(self, dry_run=False): """ @@ -2189,17 +2205,17 @@ class EC2Connection(AWSQueryConnection): present, only the Snapshots associated with these snapshot ids will be returned. - :type owner: str - :param owner: If present, only the snapshots owned by the specified user + :type owner: str or list + :param owner: If present, only the snapshots owned by the specified user(s) will be returned. Valid values are: * self * amazon * AWS Account ID - :type restorable_by: str + :type restorable_by: str or list :param restorable_by: If present, only the snapshots that are restorable - by the specified account id will be returned. + by the specified account id(s) will be returned. :type filters: dict :param filters: Optional filters that can be used to limit @@ -2220,10 +2236,11 @@ class EC2Connection(AWSQueryConnection): params = {} if snapshot_ids: self.build_list_params(params, snapshot_ids, 'SnapshotId') + if owner: - params['Owner'] = owner + self.build_list_params(params, owner, 'Owner') if restorable_by: - params['RestorableBy'] = restorable_by + self.build_list_params(params, restorable_by, 'RestorableBy') if filters: self.build_filter_params(params, filters) if dry_run: diff --git a/awx/lib/site-packages/boto/ec2/elb/__init__.py b/awx/lib/site-packages/boto/ec2/elb/__init__.py index be9490526e..e5ae588655 100644 --- a/awx/lib/site-packages/boto/ec2/elb/__init__.py +++ b/awx/lib/site-packages/boto/ec2/elb/__init__.py @@ -188,13 +188,13 @@ class ELBConnection(AWSQueryConnection): (LoadBalancerPortNumber, InstancePortNumber, Protocol, InstanceProtocol, SSLCertificateId). - Where; - - LoadBalancerPortNumber and InstancePortNumber are integer - values between 1 and 65535. - - Protocol and InstanceProtocol is a string containing either 'TCP', - 'SSL', 'HTTP', or 'HTTPS' - - SSLCertificateId is the ARN of an SSL certificate loaded into - AWS IAM + Where: + - LoadBalancerPortNumber and InstancePortNumber are integer + values between 1 and 65535 + - Protocol and InstanceProtocol is a string containing either 'TCP', + 'SSL', 'HTTP', or 'HTTPS' + - SSLCertificateId is the ARN of an SSL certificate loaded into + AWS IAM :rtype: :class:`boto.ec2.elb.loadbalancer.LoadBalancer` :return: The newly created @@ -272,13 +272,13 @@ class ELBConnection(AWSQueryConnection): (LoadBalancerPortNumber, InstancePortNumber, Protocol, InstanceProtocol, SSLCertificateId). - Where; - - LoadBalancerPortNumber and InstancePortNumber are integer - values between 1 and 65535. - - Protocol and InstanceProtocol is a string containing either 'TCP', - 'SSL', 'HTTP', or 'HTTPS' - - SSLCertificateId is the ARN of an SSL certificate loaded into - AWS IAM + Where: + - LoadBalancerPortNumber and InstancePortNumber are integer + values between 1 and 65535 + - Protocol and InstanceProtocol is a string containing either 'TCP', + 'SSL', 'HTTP', or 'HTTPS' + - SSLCertificateId is the ARN of an SSL certificate loaded into + AWS IAM :return: The status of the request """ diff --git a/awx/lib/site-packages/boto/ec2/elb/loadbalancer.py b/awx/lib/site-packages/boto/ec2/elb/loadbalancer.py index 92bc5a2c85..fde9ac1f5d 100644 --- a/awx/lib/site-packages/boto/ec2/elb/loadbalancer.py +++ b/awx/lib/site-packages/boto/ec2/elb/loadbalancer.py @@ -342,7 +342,7 @@ class LoadBalancer(object): """ if isinstance(subnets, str) or isinstance(subnets, unicode): subnets = [subnets] - new_subnets = self.connection.detach_lb_to_subnets(self.name, subnets) + new_subnets = self.connection.detach_lb_from_subnets(self.name, subnets) self.subnets = new_subnets def apply_security_groups(self, security_groups): diff --git a/awx/lib/site-packages/boto/ec2/instance.py b/awx/lib/site-packages/boto/ec2/instance.py index 254fe23075..430647e80f 100644 --- a/awx/lib/site-packages/boto/ec2/instance.py +++ b/awx/lib/site-packages/boto/ec2/instance.py @@ -340,14 +340,6 @@ class Instance(TaggedEC2Object): self.ami_launch_index = value elif name == 'previousState': self.previous_state = value - elif name == 'name': - self.state = value - elif name == 'code': - try: - self.state_code = int(value) - except ValueError: - boto.log.warning('Error converting code (%s) to int' % value) - self.state_code = value elif name == 'instanceType': self.instance_type = value elif name == 'rootDeviceName': diff --git a/awx/lib/site-packages/boto/ec2/reservedinstance.py b/awx/lib/site-packages/boto/ec2/reservedinstance.py index b96566b63b..1386c4146a 100644 --- a/awx/lib/site-packages/boto/ec2/reservedinstance.py +++ b/awx/lib/site-packages/boto/ec2/reservedinstance.py @@ -234,11 +234,12 @@ class PriceSchedule(object): class ReservedInstancesConfiguration(object): def __init__(self, connection=None, availability_zone=None, platform=None, - instance_count=None): + instance_count=None, instance_type=None): self.connection = connection self.availability_zone = availability_zone self.platform = platform self.instance_count = instance_count + self.instance_type = instance_type def startElement(self, name, attrs, connection): return None @@ -250,6 +251,8 @@ class ReservedInstancesConfiguration(object): self.platform = value elif name == 'instanceCount': self.instance_count = int(value) + elif name == 'instanceType': + self.instance_type = value else: setattr(self, name, value) @@ -271,12 +274,14 @@ class ModifyReservedInstancesResult(object): class ModificationResult(object): def __init__(self, connection=None, modification_id=None, - availability_zone=None, platform=None, instance_count=None): + availability_zone=None, platform=None, instance_count=None, + instance_type=None): self.connection = connection self.modification_id = modification_id self.availability_zone = availability_zone self.platform = platform self.instance_count = instance_count + self.instance_type = instance_type def startElement(self, name, attrs, connection): return None @@ -290,6 +295,8 @@ class ModificationResult(object): self.platform = value elif name == 'instanceCount': self.instance_count = int(value) + elif name == 'instanceType': + self.instance_type = value else: setattr(self, name, value) diff --git a/awx/lib/site-packages/boto/ec2/securitygroup.py b/awx/lib/site-packages/boto/ec2/securitygroup.py index f1e81cd9f1..9f437ebbc7 100644 --- a/awx/lib/site-packages/boto/ec2/securitygroup.py +++ b/awx/lib/site-packages/boto/ec2/securitygroup.py @@ -123,6 +123,9 @@ class SecurityGroup(TaggedEC2Object): only changes the local version of the object. No information is sent to EC2. """ + if not self.rules: + raise ValueError("The security group has no rules") + target_rule = None for rule in self.rules: if rule.ip_protocol == ip_protocol: @@ -136,9 +139,9 @@ class SecurityGroup(TaggedEC2Object): if grant.cidr_ip == cidr_ip: target_grant = grant if target_grant: - rule.grants.remove(target_grant, dry_run=dry_run) - if len(rule.grants) == 0: - self.rules.remove(target_rule, dry_run=dry_run) + rule.grants.remove(target_grant) + if len(rule.grants) == 0: + self.rules.remove(target_rule) def authorize(self, ip_protocol=None, from_port=None, to_port=None, cidr_ip=None, src_group=None, dry_run=False): diff --git a/awx/lib/site-packages/boto/elastictranscoder/layer1.py b/awx/lib/site-packages/boto/elastictranscoder/layer1.py index d741530a93..8799753cd6 100644 --- a/awx/lib/site-packages/boto/elastictranscoder/layer1.py +++ b/awx/lib/site-packages/boto/elastictranscoder/layer1.py @@ -387,8 +387,8 @@ class ElasticTranscoderConnection(AWSAuthConnection): :param description: A description of the preset. :type container: string - :param container: The container type for the output file. This value - must be `mp4`. + :param container: The container type for the output file. Valid values + include `mp3`, `mp4`, `ogg`, `ts`, and `webm`. :type video: dict :param video: A section of the request body that specifies the video diff --git a/awx/lib/site-packages/boto/emr/__init__.py b/awx/lib/site-packages/boto/emr/__init__.py index e0cdf71207..562c582da7 100644 --- a/awx/lib/site-packages/boto/emr/__init__.py +++ b/awx/lib/site-packages/boto/emr/__init__.py @@ -43,25 +43,25 @@ def regions(): endpoint='elasticmapreduce.us-east-1.amazonaws.com', connection_cls=EmrConnection), RegionInfo(name='us-west-1', - endpoint='elasticmapreduce.us-west-1.amazonaws.com', + endpoint='us-west-1.elasticmapreduce.amazonaws.com', connection_cls=EmrConnection), RegionInfo(name='us-west-2', - endpoint='elasticmapreduce.us-west-2.amazonaws.com', + endpoint='us-west-2.elasticmapreduce.amazonaws.com', connection_cls=EmrConnection), RegionInfo(name='ap-northeast-1', - endpoint='elasticmapreduce.ap-northeast-1.amazonaws.com', + endpoint='ap-northeast-1.elasticmapreduce.amazonaws.com', connection_cls=EmrConnection), RegionInfo(name='ap-southeast-1', - endpoint='elasticmapreduce.ap-southeast-1.amazonaws.com', + endpoint='ap-southeast-1.elasticmapreduce.amazonaws.com', connection_cls=EmrConnection), RegionInfo(name='ap-southeast-2', - endpoint='elasticmapreduce.ap-southeast-2.amazonaws.com', + endpoint='ap-southeast-2.elasticmapreduce.amazonaws.com', connection_cls=EmrConnection), RegionInfo(name='eu-west-1', - endpoint='elasticmapreduce.eu-west-1.amazonaws.com', + endpoint='eu-west-1.elasticmapreduce.amazonaws.com', connection_cls=EmrConnection), RegionInfo(name='sa-east-1', - endpoint='elasticmapreduce.sa-east-1.amazonaws.com', + endpoint='sa-east-1.elasticmapreduce.amazonaws.com', connection_cls=EmrConnection), ] diff --git a/awx/lib/site-packages/boto/emr/connection.py b/awx/lib/site-packages/boto/emr/connection.py index b0815f223c..7b1c434eb4 100644 --- a/awx/lib/site-packages/boto/emr/connection.py +++ b/awx/lib/site-packages/boto/emr/connection.py @@ -28,9 +28,12 @@ import types import boto import boto.utils from boto.ec2.regioninfo import RegionInfo -from boto.emr.emrobject import JobFlow, RunJobFlowResponse -from boto.emr.emrobject import AddInstanceGroupsResponse -from boto.emr.emrobject import ModifyInstanceGroupsResponse +from boto.emr.emrobject import AddInstanceGroupsResponse, BootstrapActionList, \ + Cluster, ClusterSummaryList, HadoopStep, \ + InstanceGroupList, InstanceList, JobFlow, \ + JobFlowStepList, \ + ModifyInstanceGroupsResponse, \ + RunJobFlowResponse, StepSummaryList from boto.emr.step import JarStep from boto.connection import AWSQueryConnection from boto.exception import EmrResponseError @@ -65,10 +68,30 @@ class EmrConnection(AWSQueryConnection): https_connection_factory, path, security_token, validate_certs=validate_certs) + # Many of the EMR hostnames are of the form: + # ..amazonaws.com + # rather than the more common: + # ..amazonaws.com + # so we need to explicitly set the region_name and service_name + # for the SigV4 signing. + self.auth_region_name = self.region.name + self.auth_service_name = 'elasticmapreduce' def _required_auth_capability(self): return ['hmac-v4'] + def describe_cluster(self, cluster_id): + """ + Describes an Elastic MapReduce cluster + + :type cluster_id: str + :param cluster_id: The cluster id of interest + """ + params = { + 'ClusterId': cluster_id + } + return self.get_object('DescribeCluster', params, Cluster) + def describe_jobflow(self, jobflow_id): """ Describes a single Elastic MapReduce job flow @@ -111,6 +134,139 @@ class EmrConnection(AWSQueryConnection): return self.get_list('DescribeJobFlows', params, [('member', JobFlow)]) + def describe_step(self, cluster_id, step_id): + """ + Describe an Elastic MapReduce step + + :type cluster_id: str + :param cluster_id: The cluster id of interest + :type step_id: str + :param step_id: The step id of interest + """ + params = { + 'ClusterId': cluster_id, + 'StepId': step_id + } + + return self.get_object('DescribeStep', params, HadoopStep) + + def list_bootstrap_actions(self, cluster_id, marker=None): + """ + Get a list of bootstrap actions for an Elastic MapReduce cluster + + :type cluster_id: str + :param cluster_id: The cluster id of interest + :type marker: str + :param marker: Pagination marker + """ + params = { + 'ClusterId': cluster_id + } + + if marker: + params['Marker'] = marker + + return self.get_object('ListBootstrapActions', params, BootstrapActionList) + + def list_clusters(self, created_after=None, created_before=None, + cluster_states=None, marker=None): + """ + List Elastic MapReduce clusters with optional filtering + + :type created_after: datetime + :param created_after: Bound on cluster creation time + :type created_before: datetime + :param created_before: Bound on cluster creation time + :type cluster_states: list + :param cluster_states: Bound on cluster states + :type marker: str + :param marker: Pagination marker + """ + params = {} + if created_after: + params['CreatedAfter'] = created_after.strftime( + boto.utils.ISO8601) + if created_before: + params['CreatedBefore'] = created_before.strftime( + boto.utils.ISO8601) + if marker: + params['Marker'] = marker + + if cluster_states: + self.build_list_params(params, cluster_states, 'ClusterStates.member') + + return self.get_object('ListClusters', params, ClusterSummaryList) + + def list_instance_groups(self, cluster_id, marker=None): + """ + List EC2 instance groups in a cluster + + :type cluster_id: str + :param cluster_id: The cluster id of interest + :type marker: str + :param marker: Pagination marker + """ + params = { + 'ClusterId': cluster_id + } + + if marker: + params['Marker'] = marker + + return self.get_object('ListInstanceGroups', params, InstanceGroupList) + + def list_instances(self, cluster_id, instance_group_id=None, + instance_group_types=None, marker=None): + """ + List EC2 instances in a cluster + + :type cluster_id: str + :param cluster_id: The cluster id of interest + :type instance_group_id: str + :param instance_group_id: The EC2 instance group id of interest + :type instance_group_types: list + :param instance_group_types: Filter by EC2 instance group type + :type marker: str + :param marker: Pagination marker + """ + params = { + 'ClusterId': cluster_id + } + + if instance_group_id: + params['InstanceGroupId'] = instance_group_id + if marker: + params['Marker'] = marker + + if instance_group_types: + self.build_list_params(params, instance_group_types, + 'InstanceGroupTypeList.member') + + return self.get_object('ListInstances', params, InstanceList) + + def list_steps(self, cluster_id, step_states=None, marker=None): + """ + List cluster steps + + :type cluster_id: str + :param cluster_id: The cluster id of interest + :type step_states: list + :param step_states: Filter by step states + :type marker: str + :param marker: Pagination marker + """ + params = { + 'ClusterId': cluster_id + } + + if marker: + params['Marker'] = marker + + if step_states: + self.build_list_params(params, step_states, 'StepStateList.member') + + self.get_object('ListSteps', params, StepSummaryList) + def terminate_jobflow(self, jobflow_id): """ Terminate an Elastic MapReduce job flow @@ -150,7 +306,7 @@ class EmrConnection(AWSQueryConnection): params.update(self._build_step_list(step_args)) return self.get_object( - 'AddJobFlowSteps', params, RunJobFlowResponse, verb='POST') + 'AddJobFlowSteps', params, JobFlowStepList, verb='POST') def add_instance_groups(self, jobflow_id, instance_groups): """ diff --git a/awx/lib/site-packages/boto/emr/emrobject.py b/awx/lib/site-packages/boto/emr/emrobject.py index 95ca7e6f9c..caf51f7629 100644 --- a/awx/lib/site-packages/boto/emr/emrobject.py +++ b/awx/lib/site-packages/boto/emr/emrobject.py @@ -60,11 +60,29 @@ class Arg(EmrObject): self.value = value +class StepId(Arg): + pass + + +class JobFlowStepList(EmrObject): + def __ini__(self, connection=None): + self.connection = connection + self.stepids = None + + def startElement(self, name, attrs, connection): + if name == 'StepIds': + self.stepids = ResultSet([('member', StepId)]) + return self.stepids + else: + return None + + class BootstrapAction(EmrObject): Fields = set([ 'Args', 'Name', 'Path', + 'ScriptPath', ]) def startElement(self, name, attrs, connection): @@ -174,3 +192,281 @@ class JobFlow(EmrObject): return self.bootstrapactions else: return None + + +class ClusterTimeline(EmrObject): + Fields = set([ + 'CreationDateTime', + 'ReadyDateTime', + 'EndDateTime' + ]) + + +class ClusterStatus(EmrObject): + Fields = set([ + 'State', + 'StateChangeReason', + 'Timeline' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.timeline = None + + def startElement(self, name, attrs, connection): + if name == 'Timeline': + self.timeline = ClusterTimeline() + return self.timeline + else: + return None + + +class Ec2InstanceAttributes(EmrObject): + Fields = set([ + 'Ec2KeyName', + 'Ec2SubnetId', + 'Ec2AvailabilityZone', + 'IamInstanceProfile' + ]) + + +class Application(EmrObject): + Fields = set([ + 'Name', + 'Version', + 'Args', + 'AdditionalInfo' + ]) + + +class Cluster(EmrObject): + Fields = set([ + 'Id', + 'Name', + 'LogUri', + 'RequestedAmiVersion', + 'RunningAmiVersion', + 'AutoTerminate', + 'TerminationProtected', + 'VisibleToAllUsers' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.status = None + self.ec2instanceattributes = None + self.applications = None + + def startElement(self, name, attrs, connection): + if name == 'Status': + self.status = ClusterStatus() + return self.status + elif name == 'EC2InstanceAttributes': + self.ec2instanceattributes = Ec2InstanceAttributes() + return self.ec2instanceattributes + elif name == 'Applications': + self.applications = ResultSet([('member', Application)]) + else: + return None + + +class ClusterSummary(Cluster): + Fields = set([ + 'Id', + 'Name' + ]) + + +class ClusterSummaryList(EmrObject): + Fields = set([ + 'Marker' + ]) + + def __init__(self, connection): + self.connection = connection + self.clusters = None + + def startElement(self, name, attrs, connection): + if name == 'Clusters': + self.clusters = ResultSet([('member', ClusterSummary)]) + return self.clusters + else: + return None + + +class StepConfig(EmrObject): + Fields = set([ + 'Jar' + 'MainClass' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.properties = None + self.args = None + + def startElement(self, name, attrs, connection): + if name == 'Properties': + self.properties = ResultSet([('member', KeyValue)]) + return self.properties + elif name == 'Args': + self.args = ResultSet([('member', Arg)]) + return self.args + else: + return None + + +class HadoopStep(EmrObject): + Fields = set([ + 'Id', + 'Name', + 'ActionOnFailure' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.config = None + self.status = None + + def startElement(self, name, attrs, connection): + if name == 'Config': + self.config = StepConfig() + return self.config + elif name == 'Status': + self.status = ClusterStatus() + return self.status + else: + return None + + + +class InstanceGroupInfo(EmrObject): + Fields = set([ + 'Id', + 'Name', + 'Market', + 'InstanceGroupType', + 'BidPrice', + 'InstanceType', + 'RequestedInstanceCount', + 'RunningInstanceCount' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.status = None + + def startElement(self, name, attrs, connection): + if name == 'Status': + self.status = ClusterStatus() + return self.status + else: + return None + + +class InstanceGroupList(EmrObject): + Fields = set([ + 'Marker' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.instancegroups = None + + def startElement(self, name, attrs, connection): + if name == 'InstanceGroups': + self.instancegroups = ResultSet([('member', InstanceGroupInfo)]) + return self.instancegroups + else: + return None + + +class InstanceInfo(EmrObject): + Fields = set([ + 'Id', + 'Ec2InstanceId', + 'PublicDnsName', + 'PublicIpAddress', + 'PrivateDnsName', + 'PrivateIpAddress' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.status = None + + def startElement(self, name, attrs, connection): + if name == 'Status': + self.status = ClusterStatus() + return self.status + else: + return None + + +class InstanceList(EmrObject): + Fields = set([ + 'Marker' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.instances = None + + def startElement(self, name, attrs, connection): + if name == 'Instances': + self.instances = ResultSet([('member', InstanceInfo)]) + return self.instances + else: + return None + + +class StepSummary(EmrObject): + Fields = set([ + 'Id', + 'Name' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.status = None + + def startElement(self, name, attrs, connection): + if name == 'Status': + self.status = ClusterStatus() + return self.status + else: + return None + + +class StepSummaryList(EmrObject): + Fields = set([ + 'Marker' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.steps = None + + def startElement(self, name, attrs, connection): + if name == 'Steps': + self.steps = ResultSet([('member', StepSummary)]) + return self.steps + else: + return None + + +class BootstrapActionList(EmrObject): + Fields = set([ + 'Marker' + ]) + + def __init__(self, connection=None): + self.connection = connection + self.actions = None + + def startElement(self, name, attrs, connection): + if name == 'BootstrapActions': + self.actions = ResultSet([('member', BootstrapAction)]) + return self.actions + else: + return None diff --git a/awx/lib/site-packages/boto/glacier/__init__.py b/awx/lib/site-packages/boto/glacier/__init__.py index a65733b274..5224d3453d 100644 --- a/awx/lib/site-packages/boto/glacier/__init__.py +++ b/awx/lib/site-packages/boto/glacier/__init__.py @@ -47,6 +47,9 @@ def regions(): RegionInfo(name='eu-west-1', endpoint='glacier.eu-west-1.amazonaws.com', connection_cls=Layer2), + RegionInfo(name='ap-southeast-2', + endpoint='glacier.ap-southeast-2.amazonaws.com', + connection_cls=Layer2), ] diff --git a/awx/lib/site-packages/boto/gs/bucket.py b/awx/lib/site-packages/boto/gs/bucket.py index a8ced49a0d..9e9892588e 100644 --- a/awx/lib/site-packages/boto/gs/bucket.py +++ b/awx/lib/site-packages/boto/gs/bucket.py @@ -19,12 +19,14 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. +import re import urllib import xml.sax import boto from boto import handler from boto.resultset import ResultSet +from boto.exception import GSResponseError from boto.exception import InvalidAclError from boto.gs.acl import ACL, CannedACLStrings from boto.gs.acl import SupportedPermissions as GSPermissions @@ -41,6 +43,7 @@ DEF_OBJ_ACL = 'defaultObjectAcl' STANDARD_ACL = 'acl' CORS_ARG = 'cors' LIFECYCLE_ARG = 'lifecycle' +ERROR_DETAILS_REGEX = re.compile(r'
(?P
.*)
') class Bucket(S3Bucket): """Represents a Google Cloud Storage bucket.""" @@ -99,9 +102,16 @@ class Bucket(S3Bucket): if response_headers: for rk, rv in response_headers.iteritems(): query_args_l.append('%s=%s' % (rk, urllib.quote(rv))) - - key, resp = self._get_key_internal(key_name, headers, - query_args_l=query_args_l) + try: + key, resp = self._get_key_internal(key_name, headers, + query_args_l=query_args_l) + except GSResponseError, e: + if e.status == 403 and 'Forbidden' in e.reason: + # If we failed getting an object, let the user know which object + # failed rather than just returning a generic 403. + e.reason = ("Access denied to 'gs://%s/%s'." % + (self.name, key_name)) + raise return key def copy_key(self, new_key_name, src_bucket_name, src_key_name, @@ -312,6 +322,14 @@ class Bucket(S3Bucket): headers=headers) body = response.read() if response.status != 200: + if response.status == 403: + match = ERROR_DETAILS_REGEX.search(body) + details = match.group('details') if match else None + if details: + details = (('
%s. Note that Full Control access' + ' is required to access ACLs.
') % + details) + body = re.sub(ERROR_DETAILS_REGEX, details, body) raise self.connection.provider.storage_response_error( response.status, response.reason, body) return body diff --git a/awx/lib/site-packages/boto/gs/resumable_upload_handler.py b/awx/lib/site-packages/boto/gs/resumable_upload_handler.py index e1b74347e0..d3d8629775 100644 --- a/awx/lib/site-packages/boto/gs/resumable_upload_handler.py +++ b/awx/lib/site-packages/boto/gs/resumable_upload_handler.py @@ -482,7 +482,7 @@ class ResumableUploadHandler(object): # pool connections) because httplib requires a new HTTP connection per # transaction. (Without this, calling http_conn.getresponse() would get # "ResponseNotReady".) - http_conn = conn.new_http_connection(self.tracker_uri_host, + http_conn = conn.new_http_connection(self.tracker_uri_host, conn.port, conn.is_secure) http_conn.set_debuglevel(conn.debug) diff --git a/awx/lib/site-packages/boto/handler.py b/awx/lib/site-packages/boto/handler.py index df065ccad2..e11722bf42 100644 --- a/awx/lib/site-packages/boto/handler.py +++ b/awx/lib/site-packages/boto/handler.py @@ -38,6 +38,8 @@ class XmlHandler(xml.sax.ContentHandler): def endElement(self, name): self.nodes[-1][1].endElement(name, self.current_text, self.connection) if self.nodes[-1][0] == name: + if hasattr(self.nodes[-1][1], 'endNode'): + self.nodes[-1][1].endNode(self.connection) self.nodes.pop() self.current_text = '' diff --git a/awx/lib/site-packages/boto/iam/connection.py b/awx/lib/site-packages/boto/iam/connection.py index f6fa6338d6..9cc15c6f5d 100644 --- a/awx/lib/site-packages/boto/iam/connection.py +++ b/awx/lib/site-packages/boto/iam/connection.py @@ -836,7 +836,7 @@ class IAMConnection(AWSQueryConnection): :param user_name: The username of the user :type serial_number: string - :param seriasl_number: The serial number which uniquely identifies + :param serial_number: The serial number which uniquely identifies the MFA device. :type auth_code_1: string @@ -862,7 +862,7 @@ class IAMConnection(AWSQueryConnection): :param user_name: The username of the user :type serial_number: string - :param seriasl_number: The serial number which uniquely identifies + :param serial_number: The serial number which uniquely identifies the MFA device. """ @@ -879,7 +879,7 @@ class IAMConnection(AWSQueryConnection): :param user_name: The username of the user :type serial_number: string - :param seriasl_number: The serial number which uniquely identifies + :param serial_number: The serial number which uniquely identifies the MFA device. :type auth_code_1: string diff --git a/awx/lib/site-packages/boto/manage/cmdshell.py b/awx/lib/site-packages/boto/manage/cmdshell.py index 9ee013361c..0da1c7a3e1 100644 --- a/awx/lib/site-packages/boto/manage/cmdshell.py +++ b/awx/lib/site-packages/boto/manage/cmdshell.py @@ -34,10 +34,11 @@ class SSHClient(object): def __init__(self, server, host_key_file='~/.ssh/known_hosts', - uname='root', ssh_pwd=None): + uname='root', timeout=None, ssh_pwd=None): self.server = server self.host_key_file = host_key_file self.uname = uname + self._timeout = timeout self._pkey = paramiko.RSAKey.from_private_key_file(server.ssh_key_file, password=ssh_pwd) self._ssh_client = paramiko.SSHClient() @@ -52,7 +53,8 @@ class SSHClient(object): try: self._ssh_client.connect(self.server.hostname, username=self.uname, - pkey=self._pkey) + pkey=self._pkey, + timeout=self._timeout) return except socket.error, (value, message): if value in (51, 61, 111): diff --git a/awx/lib/site-packages/boto/mws/connection.py b/awx/lib/site-packages/boto/mws/connection.py index db58e6d744..1c718af0fb 100644 --- a/awx/lib/site-packages/boto/mws/connection.py +++ b/awx/lib/site-packages/boto/mws/connection.py @@ -37,15 +37,16 @@ api_version_path = { 'Products': ('2011-10-01', 'SellerId', '/Products/2011-10-01'), 'Sellers': ('2011-07-01', 'SellerId', '/Sellers/2011-07-01'), 'Inbound': ('2010-10-01', 'SellerId', - '/FulfillmentInboundShipment/2010-10-01'), + '/FulfillmentInboundShipment/2010-10-01'), 'Outbound': ('2010-10-01', 'SellerId', - '/FulfillmentOutboundShipment/2010-10-01'), + '/FulfillmentOutboundShipment/2010-10-01'), 'Inventory': ('2010-10-01', 'SellerId', - '/FulfillmentInventory/2010-10-01'), + '/FulfillmentInventory/2010-10-01'), } content_md5 = lambda c: base64.encodestring(hashlib.md5(c).digest()).strip() decorated_attrs = ('action', 'response', 'section', 'quota', 'restore', 'version') +api_call_map = {} def add_attrs_from(func, to): @@ -67,7 +68,7 @@ def structured_lists(*fields): kw.pop(key) return func(self, *args, **kw) wrapper.__doc__ = "{0}\nLists: {1}".format(func.__doc__, - ', '.join(fields)) + ', '.join(fields)) return add_attrs_from(func, to=wrapper) return decorator @@ -101,7 +102,7 @@ def destructure_object(value, into={}, prefix=''): destructure_object(attr, into=into, prefix=prefix + '.' + name) elif filter(lambda x: isinstance(value, x), (list, set, tuple)): for index, element in [(prefix + '.' + str(i + 1), value[i]) - for i in range(len(value))]: + for i in range(len(value))]: destructure_object(element, into=into, prefix=index) elif isinstance(value, bool): into[prefix] = str(value).lower() @@ -118,7 +119,7 @@ def structured_objects(*fields): destructure_object(kw.pop(field), into=kw, prefix=field) return func(*args, **kw) wrapper.__doc__ = "{0}\nObjects: {1}".format(func.__doc__, - ', '.join(fields)) + ', '.join(fields)) return add_attrs_from(func, to=wrapper) return decorator @@ -137,7 +138,7 @@ def requires(*groups): return func(*args, **kw) message = ' OR '.join(['+'.join(g) for g in groups]) wrapper.__doc__ = "{0}\nRequired: {1}".format(func.__doc__, - message) + message) return add_attrs_from(func, to=wrapper) return decorator @@ -156,7 +157,7 @@ def exclusive(*groups): return func(*args, **kw) message = ' OR '.join(['+'.join(g) for g in groups]) wrapper.__doc__ = "{0}\nEither: {1}".format(func.__doc__, - message) + message) return add_attrs_from(func, to=wrapper) return decorator @@ -175,8 +176,8 @@ def dependent(field, *groups): return func(*args, **kw) message = ' OR '.join(['+'.join(g) for g in groups]) wrapper.__doc__ = "{0}\n{1} requires: {2}".format(func.__doc__, - field, - message) + field, + message) return add_attrs_from(func, to=wrapper) return decorator @@ -192,7 +193,7 @@ def requires_some_of(*fields): raise KeyError(message) return func(*args, **kw) wrapper.__doc__ = "{0}\nSome Required: {1}".format(func.__doc__, - ', '.join(fields)) + ', '.join(fields)) return add_attrs_from(func, to=wrapper) return decorator @@ -206,7 +207,7 @@ def boolean_arguments(*fields): kw[field] = str(kw[field]).lower() return func(*args, **kw) wrapper.__doc__ = "{0}\nBooleans: {1}".format(func.__doc__, - ', '.join(fields)) + ', '.join(fields)) return add_attrs_from(func, to=wrapper) return decorator @@ -237,6 +238,7 @@ def api_action(section, quota, restore, *api): wrapper.__doc__ = "MWS {0}/{1} API call; quota={2} restore={3:.2f}\n" \ "{4}".format(action, version, quota, restore, func.__doc__) + api_call_map[action] = func.func_name return wrapper return decorator @@ -260,7 +262,8 @@ class MWSConnection(AWSQueryConnection): Modelled off of the inherited get_object/make_request flow. """ request = self.build_base_http_request('POST', path, None, data=body, - params=params, headers=headers, host=self.server_name()) + params=params, headers=headers, + host=self.host) response = self._mexe(request, override_num_retries=None) body = response.read() boto.log.debug(body) @@ -275,6 +278,9 @@ class MWSConnection(AWSQueryConnection): digest = response.getheader('Content-MD5') assert content_md5(body) == digest return body + return self._parse_response(cls, body) + + def _parse_response(self, cls, body): obj = cls(self) h = XmlHandler(obj, self) xml.sax.parseString(body, h) @@ -285,13 +291,10 @@ class MWSConnection(AWSQueryConnection): The named method can be in CamelCase or underlined_lower_case. This is the complement to MWSConnection.any_call.action """ - # this looks ridiculous but it should be better than regex action = '_' in name and string.capwords(name, '_') or name - attribs = [getattr(self, m) for m in dir(self)] - ismethod = lambda m: type(m) is type(self.method_for) - ismatch = lambda m: getattr(m, 'action', None) == action - method = filter(ismatch, filter(ismethod, attribs)) - return method and method[0] or None + if action in api_call_map: + return getattr(self, api_call_map[action]) + return None def iter_call(self, call, *args, **kw): """Pass a call name as the first argument and a generator @@ -322,7 +325,7 @@ class MWSConnection(AWSQueryConnection): """Uploads a feed for processing by Amazon MWS. """ return self.post_request(path, kw, response, body=body, - headers=headers) + headers=headers) @structured_lists('FeedSubmissionIdList.Id', 'FeedTypeList.Type', 'FeedProcessingStatusList.Status') @@ -365,10 +368,10 @@ class MWSConnection(AWSQueryConnection): def get_service_status(self, **kw): """Instruct the user on how to get service status. """ + sections = ', '.join(map(str.lower, api_version_path.keys())) message = "Use {0}.get_(section)_service_status(), " \ "where (section) is one of the following: " \ - "{1}".format(self.__class__.__name__, - ', '.join(map(str.lower, api_version_path.keys()))) + "{1}".format(self.__class__.__name__, sections) raise AttributeError(message) @structured_lists('MarketplaceIdList.Id') @@ -583,6 +586,14 @@ class MWSConnection(AWSQueryConnection): """ return self.post_request(path, kw, response) + @requires(['PackageNumber']) + @api_action('Outbound', 30, 0.5) + def get_package_tracking_details(self, path, response, **kw): + """Returns delivery tracking information for a package in + an outbound shipment for a Multi-Channel Fulfillment order. + """ + return self.post_request(path, kw, response) + @structured_objects('Address', 'Items') @requires(['Address', 'Items']) @api_action('Outbound', 30, 0.5) @@ -659,8 +670,8 @@ class MWSConnection(AWSQueryConnection): frame that you specify. """ toggle = set(('FulfillmentChannel.Channel.1', - 'OrderStatus.Status.1', 'PaymentMethod.1', - 'LastUpdatedAfter', 'LastUpdatedBefore')) + 'OrderStatus.Status.1', 'PaymentMethod.1', + 'LastUpdatedAfter', 'LastUpdatedBefore')) for do, dont in { 'BuyerEmail': toggle.union(['SellerOrderId']), 'SellerOrderId': toggle.union(['BuyerEmail']), @@ -804,7 +815,7 @@ class MWSConnection(AWSQueryConnection): @requires(['NextToken']) @api_action('Sellers', 15, 60) def list_marketplace_participations_by_next_token(self, path, response, - **kw): + **kw): """Returns the next page of marketplaces and participations using the NextToken value that was returned by your previous request to either ListMarketplaceParticipations diff --git a/awx/lib/site-packages/boto/mws/response.py b/awx/lib/site-packages/boto/mws/response.py index 06740b56df..5c8ffb7365 100644 --- a/awx/lib/site-packages/boto/mws/response.py +++ b/awx/lib/site-packages/boto/mws/response.py @@ -33,20 +33,30 @@ class ComplexType(dict): class DeclarativeType(object): def __init__(self, _hint=None, **kw): + self._value = None if _hint is not None: self._hint = _hint - else: - class JITResponse(ResponseElement): - pass - self._hint = JITResponse - for name, value in kw.items(): - setattr(self._hint, name, value) - self._value = None + return + + class JITResponse(ResponseElement): + pass + self._hint = JITResponse + self._hint.__name__ = 'JIT_{0}/{1}'.format(self.__class__.__name__, + hex(id(self._hint))[2:]) + for name, value in kw.items(): + setattr(self._hint, name, value) + + def __repr__(self): + parent = getattr(self, '_parent', None) + return '<{0}_{1}/{2}_{3}>'.format(self.__class__.__name__, + parent and parent._name or '?', + getattr(self, '_name', '?'), + hex(id(self.__class__))) def setup(self, parent, name, *args, **kw): self._parent = parent self._name = name - self._clone = self.__class__(self._hint) + self._clone = self.__class__(_hint=self._hint) self._clone._parent = parent self._clone._name = name setattr(self._parent, self._name, self._clone) @@ -58,10 +68,7 @@ class DeclarativeType(object): raise NotImplemented def teardown(self, *args, **kw): - if self._value is None: - delattr(self._parent, self._name) - else: - setattr(self._parent, self._name, self._value) + setattr(self._parent, self._name, self._value) class Element(DeclarativeType): @@ -78,11 +85,6 @@ class SimpleList(DeclarativeType): DeclarativeType.__init__(self, *args, **kw) self._value = [] - def teardown(self, *args, **kw): - if self._value == []: - self._value = None - DeclarativeType.teardown(self, *args, **kw) - def start(self, *args, **kw): return None @@ -93,35 +95,46 @@ class SimpleList(DeclarativeType): class ElementList(SimpleList): def start(self, *args, **kw): value = self._hint(parent=self._parent, **kw) - self._value += [value] - return self._value[-1] + self._value.append(value) + return value def end(self, *args, **kw): pass -class MemberList(ElementList): - def __init__(self, *args, **kw): - self._this = kw.get('this') - ElementList.__init__(self, *args, **kw) - - def start(self, attrs={}, **kw): - Class = self._this or self._parent._type_for(self._name, attrs) - if issubclass(self._hint, ResponseElement): - ListClass = ElementList +class MemberList(Element): + def __init__(self, _member=None, _hint=None, *args, **kw): + message = 'Invalid `member` specification in {0}'.format(self.__class__.__name__) + assert 'member' not in kw, message + if _member is None: + if _hint is None: + Element.__init__(self, *args, member=ElementList(**kw)) + else: + Element.__init__(self, _hint=_hint) else: - ListClass = SimpleList - setattr(Class, Class._member, ListClass(self._hint)) - self._value = Class(attrs=attrs, parent=self._parent, **kw) - return self._value + if _hint is None: + if issubclass(_member, DeclarativeType): + member = _member(**kw) + else: + member = ElementList(_member, **kw) + Element.__init__(self, *args, member=member) + else: + message = 'Nonsensical {0} hint {1!r}'.format(self.__class__.__name__, + _hint) + raise AssertionError(message) - def end(self, *args, **kw): - self._value = getattr(self._value, self._value._member) - ElementList.end(self, *args, **kw) + def teardown(self, *args, **kw): + if self._value is None: + self._value = [] + else: + if isinstance(self._value.member, DeclarativeType): + self._value.member = [] + self._value = self._value.member + Element.teardown(self, *args, **kw) -def ResponseFactory(action): - result = globals().get(action + 'Result', ResponseElement) +def ResponseFactory(action, force=None): + result = force or globals().get(action + 'Result', ResponseElement) class MWSResponse(Response): _name = action + 'Response' @@ -141,18 +154,17 @@ def strip_namespace(func): class ResponseElement(dict): _override = {} - _member = 'member' _name = None _namespace = None - def __init__(self, connection=None, name=None, parent=None, attrs={}): + def __init__(self, connection=None, name=None, parent=None, attrs=None): if parent is not None and self._namespace is None: self._namespace = parent._namespace if connection is not None: self._connection = connection self._name = name or self._name or self.__class__.__name__ self._declared('setup', attrs=attrs) - dict.__init__(self, attrs.copy()) + dict.__init__(self, attrs and attrs.copy() or {}) def _declared(self, op, **kw): def inherit(obj): @@ -177,7 +189,7 @@ class ResponseElement(dict): do_show = lambda pair: not pair[0].startswith('_') attrs = filter(do_show, self.__dict__.items()) name = self.__class__.__name__ - if name == 'JITResponse': + if name.startswith('JIT_'): name = '^{0}^'.format(self._name or '') elif name == 'MWSResponse': name = '^{0}^'.format(self._name or name) @@ -192,7 +204,7 @@ class ResponseElement(dict): attribute = getattr(self, name, None) if isinstance(attribute, DeclarativeType): return attribute.start(name=name, attrs=attrs, - connection=connection) + connection=connection) elif attrs.getLength(): setattr(self, name, ComplexType(attrs.copy())) else: @@ -316,7 +328,7 @@ class CreateInboundShipmentPlanResult(ResponseElement): class ListInboundShipmentsResult(ResponseElement): - ShipmentData = MemberList(Element(ShipFromAddress=Element())) + ShipmentData = MemberList(ShipFromAddress=Element()) class ListInboundShipmentsByNextTokenResult(ListInboundShipmentsResult): @@ -334,8 +346,8 @@ class ListInboundShipmentItemsByNextTokenResult(ListInboundShipmentItemsResult): class ListInventorySupplyResult(ResponseElement): InventorySupplyList = MemberList( EarliestAvailability=Element(), - SupplyDetail=MemberList(\ - EarliestAvailabileToPick=Element(), + SupplyDetail=MemberList( + EarliestAvailableToPick=Element(), LatestAvailableToPick=Element(), ) ) @@ -431,13 +443,9 @@ class FulfillmentPreviewItem(ResponseElement): class FulfillmentPreview(ResponseElement): EstimatedShippingWeight = Element(ComplexWeight) - EstimatedFees = MemberList(\ - Element(\ - Amount=Element(ComplexAmount), - ), - ) + EstimatedFees = MemberList(Amount=Element(ComplexAmount)) UnfulfillablePreviewItems = MemberList(FulfillmentPreviewItem) - FulfillmentPreviewShipments = MemberList(\ + FulfillmentPreviewShipments = MemberList( FulfillmentPreviewItems=MemberList(FulfillmentPreviewItem), ) @@ -448,15 +456,14 @@ class GetFulfillmentPreviewResult(ResponseElement): class FulfillmentOrder(ResponseElement): DestinationAddress = Element() - NotificationEmailList = MemberList(str) + NotificationEmailList = MemberList(SimpleList) class GetFulfillmentOrderResult(ResponseElement): FulfillmentOrder = Element(FulfillmentOrder) - FulfillmentShipment = MemberList(Element(\ - FulfillmentShipmentItem=MemberList(), - FulfillmentShipmentPackage=MemberList(), - ) + FulfillmentShipment = MemberList( + FulfillmentShipmentItem=MemberList(), + FulfillmentShipmentPackage=MemberList(), ) FulfillmentOrderItem = MemberList() @@ -469,6 +476,11 @@ class ListAllFulfillmentOrdersByNextTokenResult(ListAllFulfillmentOrdersResult): pass +class GetPackageTrackingDetailsResult(ResponseElement): + ShipToAddress = Element() + TrackingEvents = MemberList(EventAddress=Element()) + + class Image(ResponseElement): pass @@ -533,17 +545,17 @@ class Product(ResponseElement): _namespace = 'ns2' Identifiers = Element(MarketplaceASIN=Element(), SKUIdentifier=Element()) - AttributeSets = Element(\ + AttributeSets = Element( ItemAttributes=ElementList(ItemAttributes), ) - Relationships = Element(\ + Relationships = Element( VariationParent=ElementList(VariationRelationship), ) CompetitivePricing = ElementList(CompetitivePricing) - SalesRankings = Element(\ + SalesRankings = Element( SalesRank=ElementList(SalesRank), ) - LowestOfferListings = Element(\ + LowestOfferListings = Element( LowestOfferListing=ElementList(LowestOfferListing), ) @@ -569,6 +581,10 @@ class GetMatchingProductForIdResult(ListMatchingProductsResult): pass +class GetMatchingProductForIdResponse(ResponseResultList): + _ResultClass = GetMatchingProductForIdResult + + class GetCompetitivePricingForSKUResponse(ProductsBulkOperationResponse): pass @@ -607,9 +623,9 @@ class GetProductCategoriesForASINResult(GetProductCategoriesResult): class Order(ResponseElement): OrderTotal = Element(ComplexMoney) ShippingAddress = Element() - PaymentExecutionDetail = Element(\ - PaymentExecutionDetailItem=ElementList(\ - PaymentExecutionDetailItem=Element(\ + PaymentExecutionDetail = Element( + PaymentExecutionDetailItem=ElementList( + PaymentExecutionDetailItem=Element( Payment=Element(ComplexMoney) ) ) diff --git a/awx/lib/site-packages/boto/opsworks/layer1.py b/awx/lib/site-packages/boto/opsworks/layer1.py index ba147cd079..0d79a05b03 100644 --- a/awx/lib/site-packages/boto/opsworks/layer1.py +++ b/awx/lib/site-packages/boto/opsworks/layer1.py @@ -80,11 +80,51 @@ class OpsWorksConnection(AWSQueryConnection): def _required_auth_capability(self): return ['hmac-v4'] + def assign_volume(self, volume_id, instance_id=None): + """ + Assigns one of the stack's registered Amazon EBS volumes to a + specified instance. The volume must first be registered with + the stack by calling RegisterVolume. For more information, see + ``_. + + :type volume_id: string + :param volume_id: The volume ID. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'VolumeId': volume_id, } + if instance_id is not None: + params['InstanceId'] = instance_id + return self.make_request(action='AssignVolume', + body=json.dumps(params)) + + def associate_elastic_ip(self, elastic_ip, instance_id=None): + """ + Associates one of the stack's registered Elastic IP addresses + with a specified instance. The address must first be + registered with the stack by calling RegisterElasticIp. For + more information, see ``_. + + :type elastic_ip: string + :param elastic_ip: The Elastic IP address. + + :type instance_id: string + :param instance_id: The instance ID. + + """ + params = {'ElasticIp': elastic_ip, } + if instance_id is not None: + params['InstanceId'] = instance_id + return self.make_request(action='AssociateElasticIp', + body=json.dumps(params)) + def attach_elastic_load_balancer(self, elastic_load_balancer_name, layer_id): """ - Attaches an Elastic Load Balancing instance to a specified - layer. + Attaches an Elastic Load Balancing load balancer to a + specified layer. You must create the Elastic Load Balancing instance separately, by using the Elastic Load Balancing console, API, @@ -136,8 +176,8 @@ class OpsWorksConnection(AWSQueryConnection): will be launched into this VPC, and you cannot change the ID later. + If your account supports EC2 Classic, the default value is no VPC. - + If you account does not support EC2 Classic, the default value is the - default VPC for the specified region. + + If your account does not support EC2 Classic, the default value is + the default VPC for the specified region. If the VPC ID corresponds to a default VPC and you have specified @@ -559,7 +599,8 @@ class OpsWorksConnection(AWSQueryConnection): custom_instance_profile_arn=None, custom_security_group_ids=None, packages=None, volume_configurations=None, enable_auto_healing=None, - auto_assign_elastic_ips=None, custom_recipes=None, + auto_assign_elastic_ips=None, + auto_assign_public_ips=None, custom_recipes=None, install_updates_on_boot=None): """ Creates a layer. For more information, see `How to Create a @@ -629,7 +670,13 @@ class OpsWorksConnection(AWSQueryConnection): :type auto_assign_elastic_ips: boolean :param auto_assign_elastic_ips: Whether to automatically assign an - `Elastic IP address`_ to the layer. + `Elastic IP address`_ to the layer's instances. For more + information, see `How to Edit a Layer`_. + + :type auto_assign_public_ips: boolean + :param auto_assign_public_ips: For stacks that are running in a VPC, + whether to automatically assign a public IP address to the layer's + instances. For more information, see `How to Edit a Layer`_. :type custom_recipes: dict :param custom_recipes: A `LayerCustomRecipes` object that specifies the @@ -668,6 +715,8 @@ class OpsWorksConnection(AWSQueryConnection): params['EnableAutoHealing'] = enable_auto_healing if auto_assign_elastic_ips is not None: params['AutoAssignElasticIps'] = auto_assign_elastic_ips + if auto_assign_public_ips is not None: + params['AutoAssignPublicIps'] = auto_assign_public_ips if custom_recipes is not None: params['CustomRecipes'] = custom_recipes if install_updates_on_boot is not None: @@ -700,8 +749,8 @@ class OpsWorksConnection(AWSQueryConnection): into this VPC, and you cannot change the ID later. + If your account supports EC2 Classic, the default value is no VPC. - + If you account does not support EC2 Classic, the default value is the - default VPC for the specified region. + + If your account does not support EC2 Classic, the default value is + the default VPC for the specified region. If the VPC ID corresponds to a default VPC and you have specified @@ -954,6 +1003,33 @@ class OpsWorksConnection(AWSQueryConnection): return self.make_request(action='DeleteUserProfile', body=json.dumps(params)) + def deregister_elastic_ip(self, elastic_ip): + """ + Deregisters a specified Elastic IP address. The address can + then be registered by another stack. For more information, see + ``_. + + :type elastic_ip: string + :param elastic_ip: The Elastic IP address. + + """ + params = {'ElasticIp': elastic_ip, } + return self.make_request(action='DeregisterElasticIp', + body=json.dumps(params)) + + def deregister_volume(self, volume_id): + """ + Deregisters an Amazon EBS volume. The volume can then be + registered by another stack. For more information, see ``_. + + :type volume_id: string + :param volume_id: The volume ID. + + """ + params = {'VolumeId': volume_id, } + return self.make_request(action='DeregisterVolume', + body=json.dumps(params)) + def describe_apps(self, stack_id=None, app_ids=None): """ Requests a description of a specified set of apps. @@ -1047,7 +1123,7 @@ class OpsWorksConnection(AWSQueryConnection): return self.make_request(action='DescribeDeployments', body=json.dumps(params)) - def describe_elastic_ips(self, instance_id=None, ips=None): + def describe_elastic_ips(self, instance_id=None, stack_id=None, ips=None): """ Describes `Elastic IP addresses`_. @@ -1058,6 +1134,11 @@ class OpsWorksConnection(AWSQueryConnection): `DescribeElasticIps` returns a description of the Elastic IP addresses associated with the specified instance. + :type stack_id: string + :param stack_id: A stack ID. If you include this parameter, + `DescribeElasticIps` returns a description of the Elastic IP + addresses that are registered with the specified stack. + :type ips: list :param ips: An array of Elastic IP addresses to be described. If you include this parameter, `DescribeElasticIps` returns a description @@ -1068,6 +1149,8 @@ class OpsWorksConnection(AWSQueryConnection): params = {} if instance_id is not None: params['InstanceId'] = instance_id + if stack_id is not None: + params['StackId'] = stack_id if ips is not None: params['Ips'] = ips return self.make_request(action='DescribeElasticIps', @@ -1080,8 +1163,8 @@ class OpsWorksConnection(AWSQueryConnection): You must specify at least one of the parameters. :type stack_id: string - :param stack_id: A stack ID. The action describes the Elastic Load - Balancing instances for the stack. + :param stack_id: A stack ID. The action describes the stack's Elastic + Load Balancing instances. :type layer_ids: list :param layer_ids: A list of layer IDs. The action describes the Elastic @@ -1130,7 +1213,7 @@ class OpsWorksConnection(AWSQueryConnection): return self.make_request(action='DescribeInstances', body=json.dumps(params)) - def describe_layers(self, stack_id, layer_ids=None): + def describe_layers(self, stack_id=None, layer_ids=None): """ Requests a description of one or more layers in a specified stack. @@ -1146,7 +1229,9 @@ class OpsWorksConnection(AWSQueryConnection): description of every layer in the specified stack. """ - params = {'StackId': stack_id, } + params = {} + if stack_id is not None: + params['StackId'] = stack_id if layer_ids is not None: params['LayerIds'] = layer_ids return self.make_request(action='DescribeLayers', @@ -1285,8 +1370,8 @@ class OpsWorksConnection(AWSQueryConnection): return self.make_request(action='DescribeUserProfiles', body=json.dumps(params)) - def describe_volumes(self, instance_id=None, raid_array_id=None, - volume_ids=None): + def describe_volumes(self, instance_id=None, stack_id=None, + raid_array_id=None, volume_ids=None): """ Describes an instance's Amazon EBS volumes. @@ -1297,6 +1382,10 @@ class OpsWorksConnection(AWSQueryConnection): `DescribeVolumes` returns descriptions of the volumes associated with the specified instance. + :type stack_id: string + :param stack_id: A stack ID. The action describes the stack's + registered Amazon EBS volumes. + :type raid_array_id: string :param raid_array_id: The RAID array ID. If you use this parameter, `DescribeVolumes` returns descriptions of the volumes associated @@ -1311,6 +1400,8 @@ class OpsWorksConnection(AWSQueryConnection): params = {} if instance_id is not None: params['InstanceId'] = instance_id + if stack_id is not None: + params['StackId'] = stack_id if raid_array_id is not None: params['RaidArrayId'] = raid_array_id if volume_ids is not None: @@ -1321,7 +1412,7 @@ class OpsWorksConnection(AWSQueryConnection): def detach_elastic_load_balancer(self, elastic_load_balancer_name, layer_id): """ - Detaches a specified Elastic Load Balancing instance from it's + Detaches a specified Elastic Load Balancing instance from its layer. :type elastic_load_balancer_name: string @@ -1340,6 +1431,20 @@ class OpsWorksConnection(AWSQueryConnection): return self.make_request(action='DetachElasticLoadBalancer', body=json.dumps(params)) + def disassociate_elastic_ip(self, elastic_ip): + """ + Disassociates an Elastic IP address from its instance. The + address remains registered with the stack. For more + information, see ``_. + + :type elastic_ip: string + :param elastic_ip: The Elastic IP address. + + """ + params = {'ElasticIp': elastic_ip, } + return self.make_request(action='DisassociateElasticIp', + body=json.dumps(params)) + def get_hostname_suggestion(self, layer_id): """ Gets a generated host name for the specified layer, based on @@ -1366,6 +1471,45 @@ class OpsWorksConnection(AWSQueryConnection): return self.make_request(action='RebootInstance', body=json.dumps(params)) + def register_elastic_ip(self, elastic_ip, stack_id): + """ + Registers an Elastic IP address with a specified stack. An + address can be registered with only one stack at a time. If + the address is already registered, you must first deregister + it by calling DeregisterElasticIp. For more information, see + ``_. + + :type elastic_ip: string + :param elastic_ip: The Elastic IP address. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {'ElasticIp': elastic_ip, 'StackId': stack_id, } + return self.make_request(action='RegisterElasticIp', + body=json.dumps(params)) + + def register_volume(self, stack_id, ec_2_volume_id=None): + """ + Registers an Amazon EBS volume with a specified stack. A + volume can be registered with only one stack at a time. If the + volume is already registered, you must first deregister it by + calling DeregisterVolume. For more information, see ``_. + + :type ec_2_volume_id: string + :param ec_2_volume_id: The Amazon EBS volume ID. + + :type stack_id: string + :param stack_id: The stack ID. + + """ + params = {'StackId': stack_id, } + if ec_2_volume_id is not None: + params['Ec2VolumeId'] = ec_2_volume_id + return self.make_request(action='RegisterVolume', + body=json.dumps(params)) + def set_load_based_auto_scaling(self, layer_id, enable=None, up_scaling=None, down_scaling=None): """ @@ -1511,6 +1655,19 @@ class OpsWorksConnection(AWSQueryConnection): return self.make_request(action='StopStack', body=json.dumps(params)) + def unassign_volume(self, volume_id): + """ + Unassigns an assigned Amazon EBS volume. The volume remains + registered with the stack. For more information, see ``_. + + :type volume_id: string + :param volume_id: The volume ID. + + """ + params = {'VolumeId': volume_id, } + return self.make_request(action='UnassignVolume', + body=json.dumps(params)) + def update_app(self, app_id, name=None, description=None, type=None, app_source=None, domains=None, enable_ssl=None, ssl_configuration=None, attributes=None): @@ -1568,6 +1725,24 @@ class OpsWorksConnection(AWSQueryConnection): return self.make_request(action='UpdateApp', body=json.dumps(params)) + def update_elastic_ip(self, elastic_ip, name=None): + """ + Updates a registered Elastic IP address's name. For more + information, see ``_. + + :type elastic_ip: string + :param elastic_ip: The address. + + :type name: string + :param name: The new name. + + """ + params = {'ElasticIp': elastic_ip, } + if name is not None: + params['Name'] = name + return self.make_request(action='UpdateElasticIp', + body=json.dumps(params)) + def update_instance(self, instance_id, layer_ids=None, instance_type=None, auto_scaling_type=None, hostname=None, os=None, ami_id=None, @@ -1673,7 +1848,8 @@ class OpsWorksConnection(AWSQueryConnection): attributes=None, custom_instance_profile_arn=None, custom_security_group_ids=None, packages=None, volume_configurations=None, enable_auto_healing=None, - auto_assign_elastic_ips=None, custom_recipes=None, + auto_assign_elastic_ips=None, + auto_assign_public_ips=None, custom_recipes=None, install_updates_on_boot=None): """ Updates a specified layer. @@ -1718,7 +1894,13 @@ class OpsWorksConnection(AWSQueryConnection): :type auto_assign_elastic_ips: boolean :param auto_assign_elastic_ips: Whether to automatically assign an - `Elastic IP address`_ to the layer. + `Elastic IP address`_ to the layer's instances. For more + information, see `How to Edit a Layer`_. + + :type auto_assign_public_ips: boolean + :param auto_assign_public_ips: For stacks that are running in a VPC, + whether to automatically assign a public IP address to the layer's + instances. For more information, see `How to Edit a Layer`_. :type custom_recipes: dict :param custom_recipes: A `LayerCustomRecipes` object that specifies the @@ -1756,6 +1938,8 @@ class OpsWorksConnection(AWSQueryConnection): params['EnableAutoHealing'] = enable_auto_healing if auto_assign_elastic_ips is not None: params['AutoAssignElasticIps'] = auto_assign_elastic_ips + if auto_assign_public_ips is not None: + params['AutoAssignPublicIps'] = auto_assign_public_ips if custom_recipes is not None: params['CustomRecipes'] = custom_recipes if install_updates_on_boot is not None: @@ -1934,6 +2118,29 @@ class OpsWorksConnection(AWSQueryConnection): return self.make_request(action='UpdateUserProfile', body=json.dumps(params)) + def update_volume(self, volume_id, name=None, mount_point=None): + """ + Updates an Amazon EBS volume's name or mount point. For more + information, see ``_. + + :type volume_id: string + :param volume_id: The volume ID. + + :type name: string + :param name: The new name. + + :type mount_point: string + :param mount_point: The new mount point. + + """ + params = {'VolumeId': volume_id, } + if name is not None: + params['Name'] = name + if mount_point is not None: + params['MountPoint'] = mount_point + return self.make_request(action='UpdateVolume', + body=json.dumps(params)) + def make_request(self, action, body): headers = { 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), diff --git a/awx/lib/site-packages/boto/redshift/__init__.py b/awx/lib/site-packages/boto/redshift/__init__.py index fca2a790da..1019e895a5 100644 --- a/awx/lib/site-packages/boto/redshift/__init__.py +++ b/awx/lib/site-packages/boto/redshift/__init__.py @@ -45,6 +45,12 @@ def regions(): RegionInfo(name='ap-northeast-1', endpoint='redshift.ap-northeast-1.amazonaws.com', connection_cls=cls), + RegionInfo(name='ap-southeast-1', + endpoint='redshift.ap-southeast-1.amazonaws.com', + connection_cls=cls), + RegionInfo(name='ap-southeast-2', + endpoint='redshift.ap-southeast-2.amazonaws.com', + connection_cls=cls), ] diff --git a/awx/lib/site-packages/boto/redshift/exceptions.py b/awx/lib/site-packages/boto/redshift/exceptions.py index b4f60dd804..0457dcd17d 100644 --- a/awx/lib/site-packages/boto/redshift/exceptions.py +++ b/awx/lib/site-packages/boto/redshift/exceptions.py @@ -188,3 +188,272 @@ class AccessToSnapshotDeniedFault(JSONResponseError): class UnauthorizedOperationFault(JSONResponseError): pass + + +class SnapshotCopyAlreadyDisabled(JSONResponseError): + pass + + +class ClusterNotFound(JSONResponseError): + pass + + +class UnknownSnapshotCopyRegion(JSONResponseError): + pass + + +class InvalidClusterSubnetState(JSONResponseError): + pass + + +class ReservedNodeQuotaExceeded(JSONResponseError): + pass + + +class InvalidClusterState(JSONResponseError): + pass + + +class HsmClientCertificateQuotaExceeded(JSONResponseError): + pass + + +class SubscriptionCategoryNotFound(JSONResponseError): + pass + + +class HsmClientCertificateNotFound(JSONResponseError): + pass + + +class SubscriptionEventIdNotFound(JSONResponseError): + pass + + +class ClusterSecurityGroupAlreadyExists(JSONResponseError): + pass + + +class HsmConfigurationAlreadyExists(JSONResponseError): + pass + + +class NumberOfNodesQuotaExceeded(JSONResponseError): + pass + + +class ReservedNodeOfferingNotFound(JSONResponseError): + pass + + +class BucketNotFound(JSONResponseError): + pass + + +class InsufficientClusterCapacity(JSONResponseError): + pass + + +class InvalidRestore(JSONResponseError): + pass + + +class UnauthorizedOperation(JSONResponseError): + pass + + +class ClusterQuotaExceeded(JSONResponseError): + pass + + +class InvalidVPCNetworkState(JSONResponseError): + pass + + +class ClusterSnapshotNotFound(JSONResponseError): + pass + + +class AuthorizationQuotaExceeded(JSONResponseError): + pass + + +class InvalidHsmClientCertificateState(JSONResponseError): + pass + + +class SNSTopicArnNotFound(JSONResponseError): + pass + + +class ResizeNotFound(JSONResponseError): + pass + + +class ClusterSubnetGroupNotFound(JSONResponseError): + pass + + +class SNSNoAuthorization(JSONResponseError): + pass + + +class ClusterSnapshotQuotaExceeded(JSONResponseError): + pass + + +class AccessToSnapshotDenied(JSONResponseError): + pass + + +class InvalidClusterSecurityGroupState(JSONResponseError): + pass + + +class NumberOfNodesPerClusterLimitExceeded(JSONResponseError): + pass + + +class ClusterSubnetQuotaExceeded(JSONResponseError): + pass + + +class SNSInvalidTopic(JSONResponseError): + pass + + +class ClusterSecurityGroupNotFound(JSONResponseError): + pass + + +class InvalidElasticIp(JSONResponseError): + pass + + +class InvalidClusterParameterGroupState(JSONResponseError): + pass + + +class InvalidHsmConfigurationState(JSONResponseError): + pass + + + +class ClusterAlreadyExists(JSONResponseError): + pass + + +class HsmConfigurationQuotaExceeded(JSONResponseError): + pass + + +class ClusterSnapshotAlreadyExists(JSONResponseError): + pass + + +class SubscriptionSeverityNotFound(JSONResponseError): + pass + + +class SourceNotFound(JSONResponseError): + pass + + +class ReservedNodeAlreadyExists(JSONResponseError): + pass + + +class ClusterSubnetGroupQuotaExceeded(JSONResponseError): + pass + + +class ClusterParameterGroupNotFound(JSONResponseError): + pass + + +class InvalidS3BucketName(JSONResponseError): + pass + + +class InvalidS3KeyPrefix(JSONResponseError): + pass + + +class SubscriptionAlreadyExist(JSONResponseError): + pass + + +class HsmConfigurationNotFound(JSONResponseError): + pass + + +class AuthorizationNotFound(JSONResponseError): + pass + + +class ClusterSecurityGroupQuotaExceeded(JSONResponseError): + pass + + +class EventSubscriptionQuotaExceeded(JSONResponseError): + pass + + +class AuthorizationAlreadyExists(JSONResponseError): + pass + + +class InvalidClusterSnapshotState(JSONResponseError): + pass + + +class ClusterParameterGroupQuotaExceeded(JSONResponseError): + pass + + +class SnapshotCopyDisabled(JSONResponseError): + pass + + +class ClusterSubnetGroupAlreadyExists(JSONResponseError): + pass + + +class ReservedNodeNotFound(JSONResponseError): + pass + + +class HsmClientCertificateAlreadyExists(JSONResponseError): + pass + + +class InvalidClusterSubnetGroupState(JSONResponseError): + pass + + +class SubscriptionNotFound(JSONResponseError): + pass + + +class InsufficientS3BucketPolicy(JSONResponseError): + pass + + +class ClusterParameterGroupAlreadyExists(JSONResponseError): + pass + + +class UnsupportedOption(JSONResponseError): + pass + + +class CopyToRegionDisabled(JSONResponseError): + pass + + +class SnapshotCopyAlreadyEnabled(JSONResponseError): + pass + + +class IncompatibleOrderableOptions(JSONResponseError): + pass diff --git a/awx/lib/site-packages/boto/redshift/layer1.py b/awx/lib/site-packages/boto/redshift/layer1.py index 6ba3fd3d00..2f5a332a66 100644 --- a/awx/lib/site-packages/boto/redshift/layer1.py +++ b/awx/lib/site-packages/boto/redshift/layer1.py @@ -31,56 +31,31 @@ from boto.redshift import exceptions class RedshiftConnection(AWSQueryConnection): """ Amazon Redshift **Overview** - This is the Amazon Redshift API Reference. This guide provides - descriptions and samples of the Amazon Redshift API. + This is an interface reference for Amazon Redshift. It contains + documentation for one of the programming or command line + interfaces you can use to manage Amazon Redshift clusters. Note + that Amazon Redshift is asynchronous, which means that some + interfaces may require techniques, such as polling or asynchronous + callback handlers, to determine when a command has been applied. + In this reference, the parameter descriptions indicate whether a + change is applied immediately, on the next instance reboot, or + during the next maintenance window. For a summary of the Amazon + Redshift cluster management interfaces, go to `Using the Amazon + Redshift Management Interfaces `_. Amazon Redshift manages all the work of setting up, operating, and scaling a data warehouse: provisioning capacity, monitoring and backing up the cluster, and applying patches and upgrades to the Amazon Redshift engine. You can focus on using your data to acquire new insights for your business and customers. - **Are You a First-Time Amazon Redshift User?** + If you are a first-time user of Amazon Redshift, we recommend that - you begin by reading the following sections: + you begin by reading the The `Amazon Redshift Getting Started + Guide`_ - - - + Service Highlights and Pricing - The `product detail page`_ - provides the Amazon Redshift value proposition, service highlights - and pricing. - + Getting Started - The `Getting Started Guide`_ includes an - example that walks you through the process of creating a cluster, - creating database tables, uploading data, and testing queries. - - - - After you complete the Getting Started Guide, we recommend that - you explore one of the following guides: - - - + Cluster Management - If you are responsible for managing Amazon - Redshift clusters, the `Cluster Management Guide`_ shows you how - to create and manage Amazon Redshift clusters. If you are an - application developer, you can use the Amazon Redshift Query API - to manage clusters programmatically. Additionally, the AWS SDK - libraries that wrap the underlying Amazon Redshift API simplify - your programming tasks. If you prefer a more interactive way of - managing clusters, you can use the Amazon Redshift console and the - AWS command line interface (AWS CLI). For information about the - API and CLI, go to the following manuals : - - + API Reference ( this document ) - + `CLI Reference`_ - - + Amazon Redshift Database Database Developer - If you are a - database developer, the Amazon Redshift `Database Developer - Guide`_ explains how to design, build, query, and maintain the - databases that make up your data warehouse. - - - For a list of supported AWS regions where you can provision a - cluster, go to the `Regions and Endpoints`_ section in the Amazon - Web Services Glossary . + If you are a database developer, the `Amazon Redshift Database + Developer Guide`_ explains how to design, build, query, and + maintain the databases that make up your data warehouse. """ APIVersion = "2012-12-01" DefaultRegionName = "us-east-1" @@ -88,48 +63,75 @@ class RedshiftConnection(AWSQueryConnection): ResponseError = JSONResponseError _faults = { - "ClusterNotFound": exceptions.ClusterNotFoundFault, - "InvalidClusterSubnetState": exceptions.InvalidClusterSubnetStateFault, - "InvalidClusterParameterGroupState": exceptions.InvalidClusterParameterGroupStateFault, - "ReservedNodeQuotaExceeded": exceptions.ReservedNodeQuotaExceededFault, - "InvalidClusterState": exceptions.InvalidClusterStateFault, - "InvalidRestore": exceptions.InvalidRestoreFault, - "ClusterSecurityGroupAlreadyExists": exceptions.ClusterSecurityGroupAlreadyExistsFault, - "NumberOfNodesQuotaExceeded": exceptions.NumberOfNodesQuotaExceededFault, - "ReservedNodeOfferingNotFound": exceptions.ReservedNodeOfferingNotFoundFault, - "InsufficientClusterCapacity": exceptions.InsufficientClusterCapacityFault, - "UnauthorizedOperation": exceptions.UnauthorizedOperationFault, - "ClusterQuotaExceeded": exceptions.ClusterQuotaExceededFault, - "InvalidVPCNetworkState": exceptions.InvalidVPCNetworkStateFault, - "ClusterSnapshotNotFound": exceptions.ClusterSnapshotNotFoundFault, - "AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceededFault, - "InvalidSubne": exceptions.InvalidSubnet, - "ResizeNotFound": exceptions.ResizeNotFoundFault, - "ClusterSubnetGroupNotFound": exceptions.ClusterSubnetGroupNotFoundFault, - "ClusterSnapshotQuotaExceeded": exceptions.ClusterSnapshotQuotaExceededFault, - "AccessToSnapshotDenied": exceptions.AccessToSnapshotDeniedFault, - "InvalidClusterSecurityGroupState": exceptions.InvalidClusterSecurityGroupStateFault, - "NumberOfNodesPerClusterLimitExceeded": exceptions.NumberOfNodesPerClusterLimitExceededFault, - "ClusterSubnetQuotaExceeded": exceptions.ClusterSubnetQuotaExceededFault, - "UnsupportedOption": exceptions.UnsupportedOptionFault, - "ClusterSecurityGroupNotFound": exceptions.ClusterSecurityGroupNotFoundFault, - "ClusterAlreadyExists": exceptions.ClusterAlreadyExistsFault, - "ClusterSnapshotAlreadyExists": exceptions.ClusterSnapshotAlreadyExistsFault, - "ReservedNodeAlreadyExists": exceptions.ReservedNodeAlreadyExistsFault, - "ClusterSubnetGroupQuotaExceeded": exceptions.ClusterSubnetGroupQuotaExceededFault, - "ClusterParameterGroupNotFound": exceptions.ClusterParameterGroupNotFoundFault, - "AuthorizationNotFound": exceptions.AuthorizationNotFoundFault, - "ClusterSecurityGroupQuotaExceeded": exceptions.ClusterSecurityGroupQuotaExceededFault, - "AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExistsFault, - "InvalidClusterSnapshotState": exceptions.InvalidClusterSnapshotStateFault, - "ClusterParameterGroupQuotaExceeded": exceptions.ClusterParameterGroupQuotaExceededFault, - "ClusterSubnetGroupAlreadyExists": exceptions.ClusterSubnetGroupAlreadyExistsFault, - "ReservedNodeNotFound": exceptions.ReservedNodeNotFoundFault, - "InvalidClusterSubnetGroupState": exceptions.InvalidClusterSubnetGroupStateFault, - "ClusterParameterGroupAlreadyExists": exceptions.ClusterParameterGroupAlreadyExistsFault, + "SnapshotCopyAlreadyDisabled": exceptions.SnapshotCopyAlreadyDisabled, + "ClusterNotFound": exceptions.ClusterNotFound, + "UnknownSnapshotCopyRegion": exceptions.UnknownSnapshotCopyRegion, + "InvalidClusterSubnetState": exceptions.InvalidClusterSubnetState, + "InvalidSubnet": exceptions.InvalidSubnet, + "ReservedNodeQuotaExceeded": exceptions.ReservedNodeQuotaExceeded, + "InvalidClusterState": exceptions.InvalidClusterState, + "HsmClientCertificateQuotaExceeded": exceptions.HsmClientCertificateQuotaExceeded, + "SubscriptionCategoryNotFound": exceptions.SubscriptionCategoryNotFound, + "HsmClientCertificateNotFound": exceptions.HsmClientCertificateNotFound, + "SubscriptionEventIdNotFound": exceptions.SubscriptionEventIdNotFound, + "ClusterSecurityGroupAlreadyExists": exceptions.ClusterSecurityGroupAlreadyExists, + "HsmConfigurationAlreadyExists": exceptions.HsmConfigurationAlreadyExists, + "NumberOfNodesQuotaExceeded": exceptions.NumberOfNodesQuotaExceeded, + "ReservedNodeOfferingNotFound": exceptions.ReservedNodeOfferingNotFound, + "BucketNotFound": exceptions.BucketNotFound, + "InsufficientClusterCapacity": exceptions.InsufficientClusterCapacity, + "InvalidRestore": exceptions.InvalidRestore, + "UnauthorizedOperation": exceptions.UnauthorizedOperation, + "ClusterQuotaExceeded": exceptions.ClusterQuotaExceeded, + "InvalidVPCNetworkState": exceptions.InvalidVPCNetworkState, + "ClusterSnapshotNotFound": exceptions.ClusterSnapshotNotFound, + "AuthorizationQuotaExceeded": exceptions.AuthorizationQuotaExceeded, + "InvalidHsmClientCertificateState": exceptions.InvalidHsmClientCertificateState, + "SNSTopicArnNotFound": exceptions.SNSTopicArnNotFound, + "ResizeNotFound": exceptions.ResizeNotFound, + "ClusterSubnetGroupNotFound": exceptions.ClusterSubnetGroupNotFound, + "SNSNoAuthorization": exceptions.SNSNoAuthorization, + "ClusterSnapshotQuotaExceeded": exceptions.ClusterSnapshotQuotaExceeded, + "AccessToSnapshotDenied": exceptions.AccessToSnapshotDenied, + "InvalidClusterSecurityGroupState": exceptions.InvalidClusterSecurityGroupState, + "NumberOfNodesPerClusterLimitExceeded": exceptions.NumberOfNodesPerClusterLimitExceeded, + "ClusterSubnetQuotaExceeded": exceptions.ClusterSubnetQuotaExceeded, + "SNSInvalidTopic": exceptions.SNSInvalidTopic, + "ClusterSecurityGroupNotFound": exceptions.ClusterSecurityGroupNotFound, + "InvalidElasticIp": exceptions.InvalidElasticIp, + "InvalidClusterParameterGroupState": exceptions.InvalidClusterParameterGroupState, + "InvalidHsmConfigurationState": exceptions.InvalidHsmConfigurationState, + "ClusterAlreadyExists": exceptions.ClusterAlreadyExists, + "HsmConfigurationQuotaExceeded": exceptions.HsmConfigurationQuotaExceeded, + "ClusterSnapshotAlreadyExists": exceptions.ClusterSnapshotAlreadyExists, + "SubscriptionSeverityNotFound": exceptions.SubscriptionSeverityNotFound, + "SourceNotFound": exceptions.SourceNotFound, + "ReservedNodeAlreadyExists": exceptions.ReservedNodeAlreadyExists, + "ClusterSubnetGroupQuotaExceeded": exceptions.ClusterSubnetGroupQuotaExceeded, + "ClusterParameterGroupNotFound": exceptions.ClusterParameterGroupNotFound, + "InvalidS3BucketName": exceptions.InvalidS3BucketName, + "InvalidS3KeyPrefix": exceptions.InvalidS3KeyPrefix, + "SubscriptionAlreadyExist": exceptions.SubscriptionAlreadyExist, + "HsmConfigurationNotFound": exceptions.HsmConfigurationNotFound, + "AuthorizationNotFound": exceptions.AuthorizationNotFound, + "ClusterSecurityGroupQuotaExceeded": exceptions.ClusterSecurityGroupQuotaExceeded, "SubnetAlreadyInUse": exceptions.SubnetAlreadyInUse, - "AccessToSnapshotDenied": exceptions.AccessToSnapshotDeniedFault, - "UnauthorizedOperation": exceptions.UnauthorizedOperationFault, + "EventSubscriptionQuotaExceeded": exceptions.EventSubscriptionQuotaExceeded, + "AuthorizationAlreadyExists": exceptions.AuthorizationAlreadyExists, + "InvalidClusterSnapshotState": exceptions.InvalidClusterSnapshotState, + "ClusterParameterGroupQuotaExceeded": exceptions.ClusterParameterGroupQuotaExceeded, + "SnapshotCopyDisabled": exceptions.SnapshotCopyDisabled, + "ClusterSubnetGroupAlreadyExists": exceptions.ClusterSubnetGroupAlreadyExists, + "ReservedNodeNotFound": exceptions.ReservedNodeNotFound, + "HsmClientCertificateAlreadyExists": exceptions.HsmClientCertificateAlreadyExists, + "InvalidClusterSubnetGroupState": exceptions.InvalidClusterSubnetGroupState, + "SubscriptionNotFound": exceptions.SubscriptionNotFound, + "InsufficientS3BucketPolicy": exceptions.InsufficientS3BucketPolicy, + "ClusterParameterGroupAlreadyExists": exceptions.ClusterParameterGroupAlreadyExists, + "UnsupportedOption": exceptions.UnsupportedOption, + "CopyToRegionDisabled": exceptions.CopyToRegionDisabled, + "SnapshotCopyAlreadyEnabled": exceptions.SnapshotCopyAlreadyEnabled, + "IncompatibleOrderableOptions": exceptions.IncompatibleOrderableOptions, } @@ -138,7 +140,8 @@ class RedshiftConnection(AWSQueryConnection): if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) - kwargs['host'] = region.endpoint + if 'host' not in kwargs: + kwargs['host'] = region.endpoint AWSQueryConnection.__init__(self, **kwargs) self.region = region @@ -218,7 +221,10 @@ class RedshiftConnection(AWSQueryConnection): is authorized to restore. :type snapshot_cluster_identifier: string - :param snapshot_cluster_identifier: + :param snapshot_cluster_identifier: The identifier of the cluster the + snapshot was created from. This parameter is required if your IAM + user has a policy containing a snapshot resource element that + specifies anything other than * for the cluster name. :type account_with_restore_access: string :param account_with_restore_access: The identifier of the AWS customer @@ -267,6 +273,15 @@ class RedshiftConnection(AWSQueryConnection): :type source_snapshot_cluster_identifier: string :param source_snapshot_cluster_identifier: + The identifier of the cluster the source snapshot was created from. + This parameter is required if your IAM user has a policy containing + a snapshot resource element that specifies anything other than * + for the cluster name. + + Constraints: + + + + Must be the identifier for a valid cluster. :type target_snapshot_identifier: string :param target_snapshot_identifier: @@ -304,7 +319,9 @@ class RedshiftConnection(AWSQueryConnection): automated_snapshot_retention_period=None, port=None, cluster_version=None, allow_version_upgrade=None, number_of_nodes=None, publicly_accessible=None, - encrypted=None): + encrypted=None, + hsm_client_certificate_identifier=None, + hsm_configuration_identifier=None, elastic_ip=None): """ Creates a new cluster. To create the cluster in virtual private cloud (VPC), you must provide cluster subnet group @@ -323,7 +340,7 @@ class RedshiftConnection(AWSQueryConnection): To create additional databases after the cluster is created, connect to the cluster with a SQL client and use SQL commands to create a database. For more information, go to `Create a Database`_ in the - Amazon Redshift Developer Guide. + Amazon Redshift Database Developer Guide. Default: `dev` @@ -334,7 +351,7 @@ class RedshiftConnection(AWSQueryConnection): + Must contain only lowercase letters. + Cannot be a word that is reserved by the service. A list of reserved words can be found in `Reserved Words`_ in the Amazon Redshift - Developer Guide. + Database Developer Guide. :type cluster_identifier: string :param cluster_identifier: A unique identifier for the cluster. You use @@ -382,7 +399,7 @@ class RedshiftConnection(AWSQueryConnection): + Must be 1 - 128 alphanumeric characters. + First character must be a letter. + Cannot be a reserved word. A list of reserved words can be found in - `Reserved Words`_ in the Amazon Redshift Developer Guide. + `Reserved Words`_ in the Amazon Redshift Database Developer Guide. :type master_user_password: string :param master_user_password: @@ -527,6 +544,23 @@ class RedshiftConnection(AWSQueryConnection): :param encrypted: If `True`, the data in cluster is encrypted at rest. Default: false + :type hsm_client_certificate_identifier: string + :param hsm_client_certificate_identifier: Specifies the name of the HSM + client certificate the Amazon Redshift cluster uses to retrieve the + data encryption keys stored in an HSM. + + :type hsm_configuration_identifier: string + :param hsm_configuration_identifier: Specifies the name of the HSM + configuration that contains the information the Amazon Redshift + cluster can use to retrieve and store keys in an HSM. + + :type elastic_ip: string + :param elastic_ip: The Elastic IP (EIP) address for the cluster. + Constraints: The cluster must be provisioned in EC2-VPC and publicly- + accessible through an Internet gateway. For more information about + provisioning clusters in EC2-VPC, go to `Supported Platforms to + Launch Your Cluster`_ in the Amazon Redshift Management Guide. + """ params = { 'ClusterIdentifier': cluster_identifier, @@ -571,6 +605,12 @@ class RedshiftConnection(AWSQueryConnection): if encrypted is not None: params['Encrypted'] = str( encrypted).lower() + if hsm_client_certificate_identifier is not None: + params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier + if hsm_configuration_identifier is not None: + params['HsmConfigurationIdentifier'] = hsm_configuration_identifier + if elastic_ip is not None: + params['ElasticIp'] = elastic_ip return self._make_request( action='CreateCluster', verb='POST', @@ -756,6 +796,203 @@ class RedshiftConnection(AWSQueryConnection): verb='POST', path='/', params=params) + def create_event_subscription(self, subscription_name, sns_topic_arn, + source_type=None, source_ids=None, + event_categories=None, severity=None, + enabled=None): + """ + Creates an Amazon Redshift event notification subscription. + This action requires an ARN (Amazon Resource Name) of an + Amazon SNS topic created by either the Amazon Redshift + console, the Amazon SNS console, or the Amazon SNS API. To + obtain an ARN with Amazon SNS, you must create a topic in + Amazon SNS and subscribe to the topic. The ARN is displayed in + the SNS console. + + You can specify the source type, and lists of Amazon Redshift + source IDs, event categories, and event severities. + Notifications will be sent for all events you want that match + those criteria. For example, you can specify source type = + cluster, source ID = my-cluster-1 and mycluster2, event + categories = Availability, Backup, and severity = ERROR. The + subsription will only send notifications for those ERROR + events in the Availability and Backup categores for the + specified clusters. + + If you specify both the source type and source IDs, such as + source type = cluster and source identifier = my-cluster-1, + notifiactions will be sent for all the cluster events for my- + cluster-1. If you specify a source type but do not specify a + source identifier, you will receive notice of the events for + the objects of that type in your AWS account. If you do not + specify either the SourceType nor the SourceIdentifier, you + will be notified of events generated from all Amazon Redshift + sources belonging to your AWS account. You must specify a + source type if you specify a source ID. + + :type subscription_name: string + :param subscription_name: + The name of the event subscription to be created. + + Constraints: + + + + Cannot be null, empty, or blank. + + Must contain from 1 to 255 alphanumeric characters or hyphens. + + First character must be a letter. + + Cannot end with a hyphen or contain two consecutive hyphens. + + :type sns_topic_arn: string + :param sns_topic_arn: The Amazon Resource Name (ARN) of the Amazon SNS + topic used to transmit the event notifications. The ARN is created + by Amazon SNS when you create a topic and subscribe to it. + + :type source_type: string + :param source_type: The type of source that will be generating the + events. For example, if you want to be notified of events generated + by a cluster, you would set this parameter to cluster. If this + value is not specified, events are returned for all Amazon Redshift + objects in your AWS account. You must specify a source type in + order to specify source IDs. + Valid values: cluster, cluster-parameter-group, cluster-security-group, + and cluster-snapshot. + + :type source_ids: list + :param source_ids: A list of one or more identifiers of Amazon Redshift + source objects. All of the objects must be of the same type as was + specified in the source type parameter. The event subscription will + return only events generated by the specified objects. If not + specified, then events are returned for all objects within the + source type specified. + Example: my-cluster-1, my-cluster-2 + + Example: my-snapshot-20131010 + + :type event_categories: list + :param event_categories: Specifies the Amazon Redshift event categories + to be published by the event notification subscription. + Values: Configuration, Management, Monitoring, Security + + :type severity: string + :param severity: Specifies the Amazon Redshift event severity to be + published by the event notification subscription. + Values: ERROR, INFO + + :type enabled: boolean + :param enabled: A Boolean value; set to `True` to activate the + subscription, set to `False` to create the subscription but not + active it. + + """ + params = { + 'SubscriptionName': subscription_name, + 'SnsTopicArn': sns_topic_arn, + } + if source_type is not None: + params['SourceType'] = source_type + if source_ids is not None: + self.build_list_params(params, + source_ids, + 'SourceIds.member') + if event_categories is not None: + self.build_list_params(params, + event_categories, + 'EventCategories.member') + if severity is not None: + params['Severity'] = severity + if enabled is not None: + params['Enabled'] = str( + enabled).lower() + return self._make_request( + action='CreateEventSubscription', + verb='POST', + path='/', params=params) + + def create_hsm_client_certificate(self, + hsm_client_certificate_identifier): + """ + Creates an HSM client certificate that an Amazon Redshift + cluster will use to connect to the client's HSM in order to + store and retrieve the keys used to encrypt the cluster + databases. + + The command returns a public key, which you must store in the + HSM. After creating the HSM certificate, you must create an + Amazon Redshift HSM configuration that provides a cluster the + information needed to store and retrieve database encryption + keys in the HSM. For more information, go to aLinkToHSMTopic + in the Amazon Redshift Management Guide. + + :type hsm_client_certificate_identifier: string + :param hsm_client_certificate_identifier: The identifier to be assigned + to the new HSM client certificate that the cluster will use to + connect to the HSM to retrieve the database encryption keys. + + """ + params = { + 'HsmClientCertificateIdentifier': hsm_client_certificate_identifier, + } + return self._make_request( + action='CreateHsmClientCertificate', + verb='POST', + path='/', params=params) + + def create_hsm_configuration(self, hsm_configuration_identifier, + description, hsm_ip_address, + hsm_partition_name, hsm_partition_password, + hsm_server_public_certificate): + """ + Creates an HSM configuration that contains the information + required by an Amazon Redshift cluster to store and retrieve + database encryption keys in a Hardware Storeage Module (HSM). + After creating the HSM configuration, you can specify it as a + parameter when creating a cluster. The cluster will then store + its encryption keys in the HSM. + + Before creating an HSM configuration, you must have first + created an HSM client certificate. For more information, go to + aLinkToHSMTopic in the Amazon Redshift Management Guide. + + :type hsm_configuration_identifier: string + :param hsm_configuration_identifier: The identifier to be assigned to + the new Amazon Redshift HSM configuration. + + :type description: string + :param description: A text description of the HSM configuration to be + created. + + :type hsm_ip_address: string + :param hsm_ip_address: The IP address that the Amazon Redshift cluster + must use to access the HSM. + + :type hsm_partition_name: string + :param hsm_partition_name: The name of the partition in the HSM where + the Amazon Redshift clusters will store their database encryption + keys. + + :type hsm_partition_password: string + :param hsm_partition_password: The password required to access the HSM + partition. + + :type hsm_server_public_certificate: string + :param hsm_server_public_certificate: The public key used to access the + HSM client certificate, which was created by calling the Amazon + Redshift create HSM certificate command. + + """ + params = { + 'HsmConfigurationIdentifier': hsm_configuration_identifier, + 'Description': description, + 'HsmIpAddress': hsm_ip_address, + 'HsmPartitionName': hsm_partition_name, + 'HsmPartitionPassword': hsm_partition_password, + 'HsmServerPublicCertificate': hsm_server_public_certificate, + } + return self._make_request( + action='CreateHsmConfiguration', + verb='POST', + path='/', params=params) + def delete_cluster(self, cluster_identifier, skip_final_cluster_snapshot=None, final_cluster_snapshot_identifier=None): @@ -885,7 +1122,11 @@ class RedshiftConnection(AWSQueryConnection): `available` state. :type snapshot_cluster_identifier: string - :param snapshot_cluster_identifier: + :param snapshot_cluster_identifier: The unique identifier of the + cluster the snapshot was created from. This parameter is required + if your IAM user has a policy containing a snapshot resource + element that specifies anything other than * for the cluster name. + Constraints: Must be the name of valid cluster. """ params = {'SnapshotIdentifier': snapshot_identifier, } @@ -913,6 +1154,56 @@ class RedshiftConnection(AWSQueryConnection): verb='POST', path='/', params=params) + def delete_event_subscription(self, subscription_name): + """ + Deletes an Amazon Redshift event notification subscription. + + :type subscription_name: string + :param subscription_name: The name of the Amazon Redshift event + notification subscription to be deleted. + + """ + params = {'SubscriptionName': subscription_name, } + return self._make_request( + action='DeleteEventSubscription', + verb='POST', + path='/', params=params) + + def delete_hsm_client_certificate(self, + hsm_client_certificate_identifier): + """ + Deletes the specified HSM client certificate. + + :type hsm_client_certificate_identifier: string + :param hsm_client_certificate_identifier: The identifier of the HSM + client certificate to be deleted. + + """ + params = { + 'HsmClientCertificateIdentifier': hsm_client_certificate_identifier, + } + return self._make_request( + action='DeleteHsmClientCertificate', + verb='POST', + path='/', params=params) + + def delete_hsm_configuration(self, hsm_configuration_identifier): + """ + Deletes the specified Amazon Redshift HSM configuration. + + :type hsm_configuration_identifier: string + :param hsm_configuration_identifier: The identifier of the Amazon + Redshift HSM configuration to be deleted. + + """ + params = { + 'HsmConfigurationIdentifier': hsm_configuration_identifier, + } + return self._make_request( + action='DeleteHsmConfiguration', + verb='POST', + path='/', params=params) + def describe_cluster_parameter_groups(self, parameter_group_name=None, max_records=None, marker=None): """ @@ -1334,6 +1625,67 @@ class RedshiftConnection(AWSQueryConnection): verb='POST', path='/', params=params) + def describe_event_categories(self, source_type=None): + """ + Displays a list of event categories for all event source + types, or for a specified source type. For a list of the event + categories and source types, go to `Amazon Redshift Event + Notifications`_. + + :type source_type: string + :param source_type: The source type, such as cluster or parameter + group, to which the described event categories apply. + Valid values: cluster, snapshot, parameter group, and security group. + + """ + params = {} + if source_type is not None: + params['SourceType'] = source_type + return self._make_request( + action='DescribeEventCategories', + verb='POST', + path='/', params=params) + + def describe_event_subscriptions(self, subscription_name=None, + max_records=None, marker=None): + """ + Lists descriptions of all the Amazon Redshift event + notifications subscription for a customer account. If you + specify a subscription name, lists the description for that + subscription. + + :type subscription_name: string + :param subscription_name: The name of the Amazon Redshift event + notification subscription to be described. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified MaxRecords + value, a pagination token called a marker is included in the + response so that the remaining results can be retrieved. + Default: 100 + + Constraints: minimum 20, maximum 100 + + :type marker: string + :param marker: An optional pagination token provided by a previous + DescribeOrderableClusterOptions request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by MaxRecords. + + """ + params = {} + if subscription_name is not None: + params['SubscriptionName'] = subscription_name + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeEventSubscriptions', + verb='POST', + path='/', params=params) + def describe_events(self, source_identifier=None, source_type=None, start_time=None, end_time=None, duration=None, max_records=None, marker=None): @@ -1436,6 +1788,110 @@ class RedshiftConnection(AWSQueryConnection): verb='POST', path='/', params=params) + def describe_hsm_client_certificates(self, + hsm_client_certificate_identifier=None, + max_records=None, marker=None): + """ + Returns information about the specified HSM client + certificate. If no certificate ID is specified, returns + information about all the HSM certificates owned by your AWS + customer account. + + :type hsm_client_certificate_identifier: string + :param hsm_client_certificate_identifier: The identifier of a specific + HSM client certificate for which you want information. If no + identifier is specified, information is returned for all HSM client + certificates associated with Amazon Redshift clusters owned by your + AWS customer account. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results may be retrieved. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional marker returned from a previous + **DescribeOrderableClusterOptions** request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by `MaxRecords`. + + """ + params = {} + if hsm_client_certificate_identifier is not None: + params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeHsmClientCertificates', + verb='POST', + path='/', params=params) + + def describe_hsm_configurations(self, hsm_configuration_identifier=None, + max_records=None, marker=None): + """ + Returns information about the specified Amazon Redshift HSM + configuration. If no configuration ID is specified, returns + information about all the HSM configurations owned by your AWS + customer account. + + :type hsm_configuration_identifier: string + :param hsm_configuration_identifier: The identifier of a specific + Amazon Redshift HSM configuration to be described. If no identifier + is specified, information is returned for all HSM configurations + owned by your AWS customer account. + + :type max_records: integer + :param max_records: The maximum number of records to include in the + response. If more records exist than the specified `MaxRecords` + value, a marker is included in the response so that the remaining + results may be retrieved. + Default: `100` + + Constraints: minimum 20, maximum 100. + + :type marker: string + :param marker: An optional marker returned from a previous + **DescribeOrderableClusterOptions** request. If this parameter is + specified, the response includes only records beyond the marker, up + to the value specified by `MaxRecords`. + + """ + params = {} + if hsm_configuration_identifier is not None: + params['HsmConfigurationIdentifier'] = hsm_configuration_identifier + if max_records is not None: + params['MaxRecords'] = max_records + if marker is not None: + params['Marker'] = marker + return self._make_request( + action='DescribeHsmConfigurations', + verb='POST', + path='/', params=params) + + def describe_logging_status(self, cluster_identifier): + """ + Describes whether information, such as queries and connection + attempts, is being logged for the specified Amazon Redshift + cluster. + + :type cluster_identifier: string + :param cluster_identifier: The identifier of the cluster to get the + logging status from. + Example: `examplecluster` + + """ + params = {'ClusterIdentifier': cluster_identifier, } + return self._make_request( + action='DescribeLoggingStatus', + verb='POST', + path='/', params=params) + def describe_orderable_cluster_options(self, cluster_version=None, node_type=None, max_records=None, marker=None): @@ -1607,6 +2063,132 @@ class RedshiftConnection(AWSQueryConnection): verb='POST', path='/', params=params) + def disable_logging(self, cluster_identifier): + """ + Stops logging information, such as queries and connection + attempts, for the specified Amazon Redshift cluster. + + :type cluster_identifier: string + :param cluster_identifier: The identifier of the cluster on which + logging is to be stopped. + Example: `examplecluster` + + """ + params = {'ClusterIdentifier': cluster_identifier, } + return self._make_request( + action='DisableLogging', + verb='POST', + path='/', params=params) + + def disable_snapshot_copy(self, cluster_identifier): + """ + Disables the automatic copying of snapshots from one region to + another region for a specified cluster. + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of the source cluster + that you want to disable copying of snapshots to a destination + region. + Constraints: Must be the valid name of an existing cluster that has + cross-region snapshot copy enabled. + + """ + params = {'ClusterIdentifier': cluster_identifier, } + return self._make_request( + action='DisableSnapshotCopy', + verb='POST', + path='/', params=params) + + def enable_logging(self, cluster_identifier, bucket_name, + s3_key_prefix=None): + """ + Starts logging information, such as queries and connection + attempts, for the specified Amazon Redshift cluster. + + :type cluster_identifier: string + :param cluster_identifier: The identifier of the cluster on which + logging is to be started. + Example: `examplecluster` + + :type bucket_name: string + :param bucket_name: + The name of an existing S3 bucket where the log files are to be stored. + + Constraints: + + + + Must be in the same region as the cluster + + The cluster must have read bucket and put object permissions + + :type s3_key_prefix: string + :param s3_key_prefix: + The prefix applied to the log file names. + + Constraints: + + + + Cannot exceed 512 characters + + Cannot contain spaces( ), double quotes ("), single quotes ('), a + backslash (\), or control characters. The hexadecimal codes for + invalid characters are: + + + x00 to x20 + + x22 + + x27 + + x5c + + x7f or larger + + """ + params = { + 'ClusterIdentifier': cluster_identifier, + 'BucketName': bucket_name, + } + if s3_key_prefix is not None: + params['S3KeyPrefix'] = s3_key_prefix + return self._make_request( + action='EnableLogging', + verb='POST', + path='/', params=params) + + def enable_snapshot_copy(self, cluster_identifier, destination_region, + retention_period=None): + """ + Enables the automatic copy of snapshots from one region to + another region for a specified cluster. + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of the source cluster + to copy snapshots from. + Constraints: Must be the valid name of an existing cluster that does + not already have cross-region snapshot copy enabled. + + :type destination_region: string + :param destination_region: The destination region that you want to copy + snapshots to. + Constraints: Must be the name of a valid region. For more information, + see `Regions and Endpoints`_ in the Amazon Web Services General + Reference. + + :type retention_period: integer + :param retention_period: The number of days to retain automated + snapshots in the destination region after they are copied from the + source region. + Default: 7. + + Constraints: Must be at least 1 and no more than 35. + + """ + params = { + 'ClusterIdentifier': cluster_identifier, + 'DestinationRegion': destination_region, + } + if retention_period is not None: + params['RetentionPeriod'] = retention_period + return self._make_request( + action='EnableSnapshotCopy', + verb='POST', + path='/', params=params) + def modify_cluster(self, cluster_identifier, cluster_type=None, node_type=None, number_of_nodes=None, cluster_security_groups=None, @@ -1615,7 +2197,9 @@ class RedshiftConnection(AWSQueryConnection): cluster_parameter_group_name=None, automated_snapshot_retention_period=None, preferred_maintenance_window=None, - cluster_version=None, allow_version_upgrade=None): + cluster_version=None, allow_version_upgrade=None, + hsm_client_certificate_identifier=None, + hsm_configuration_identifier=None): """ Modifies the settings for a cluster. For example, you can add another security or parameter group, update the preferred @@ -1782,6 +2366,16 @@ class RedshiftConnection(AWSQueryConnection): automatically to the cluster during the maintenance window. Default: `False` + :type hsm_client_certificate_identifier: string + :param hsm_client_certificate_identifier: Specifies the name of the HSM + client certificate the Amazon Redshift cluster uses to retrieve the + data encryption keys stored in an HSM. + + :type hsm_configuration_identifier: string + :param hsm_configuration_identifier: Specifies the name of the HSM + configuration that contains the information the Amazon Redshift + cluster can use to retrieve and store keys in an HSM. + """ params = {'ClusterIdentifier': cluster_identifier, } if cluster_type is not None: @@ -1811,6 +2405,10 @@ class RedshiftConnection(AWSQueryConnection): if allow_version_upgrade is not None: params['AllowVersionUpgrade'] = str( allow_version_upgrade).lower() + if hsm_client_certificate_identifier is not None: + params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier + if hsm_configuration_identifier is not None: + params['HsmConfigurationIdentifier'] = hsm_configuration_identifier return self._make_request( action='ModifyCluster', verb='POST', @@ -1880,6 +2478,116 @@ class RedshiftConnection(AWSQueryConnection): verb='POST', path='/', params=params) + def modify_event_subscription(self, subscription_name, + sns_topic_arn=None, source_type=None, + source_ids=None, event_categories=None, + severity=None, enabled=None): + """ + Modifies an existing Amazon Redshift event notification + subscription. + + :type subscription_name: string + :param subscription_name: The name of the modified Amazon Redshift + event notification subscription. + + :type sns_topic_arn: string + :param sns_topic_arn: The Amazon Resource Name (ARN) of the SNS topic + to be used by the event notification subscription. + + :type source_type: string + :param source_type: The type of source that will be generating the + events. For example, if you want to be notified of events generated + by a cluster, you would set this parameter to cluster. If this + value is not specified, events are returned for all Amazon Redshift + objects in your AWS account. You must specify a source type in + order to specify source IDs. + Valid values: cluster, cluster-parameter-group, cluster-security-group, + and cluster-snapshot. + + :type source_ids: list + :param source_ids: A list of one or more identifiers of Amazon Redshift + source objects. All of the objects must be of the same type as was + specified in the source type parameter. The event subscription will + return only events generated by the specified objects. If not + specified, then events are returned for all objects within the + source type specified. + Example: my-cluster-1, my-cluster-2 + + Example: my-snapshot-20131010 + + :type event_categories: list + :param event_categories: Specifies the Amazon Redshift event categories + to be published by the event notification subscription. + Values: Configuration, Management, Monitoring, Security + + :type severity: string + :param severity: Specifies the Amazon Redshift event severity to be + published by the event notification subscription. + Values: ERROR, INFO + + :type enabled: boolean + :param enabled: A Boolean value indicating if the subscription is + enabled. `True` indicates the subscription is enabled + + """ + params = {'SubscriptionName': subscription_name, } + if sns_topic_arn is not None: + params['SnsTopicArn'] = sns_topic_arn + if source_type is not None: + params['SourceType'] = source_type + if source_ids is not None: + self.build_list_params(params, + source_ids, + 'SourceIds.member') + if event_categories is not None: + self.build_list_params(params, + event_categories, + 'EventCategories.member') + if severity is not None: + params['Severity'] = severity + if enabled is not None: + params['Enabled'] = str( + enabled).lower() + return self._make_request( + action='ModifyEventSubscription', + verb='POST', + path='/', params=params) + + def modify_snapshot_copy_retention_period(self, cluster_identifier, + retention_period): + """ + Modifies the number of days to retain automated snapshots in + the destination region after they are copied from the source + region. + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of the cluster for + which you want to change the retention period for automated + snapshots that are copied to a destination region. + Constraints: Must be the valid name of an existing cluster that has + cross-region snapshot copy enabled. + + :type retention_period: integer + :param retention_period: The number of days to retain automated + snapshots in the destination region after they are copied from the + source region. + If you decrease the retention period for automated snapshots that are + copied to a destination region, Amazon Redshift will delete any + existing automated snapshots that were copied to the destination + region and that fall outside of the new retention period. + + Constraints: Must be at least 1 and no more than 35. + + """ + params = { + 'ClusterIdentifier': cluster_identifier, + 'RetentionPeriod': retention_period, + } + return self._make_request( + action='ModifySnapshotCopyRetentionPeriod', + verb='POST', + path='/', params=params) + def purchase_reserved_node_offering(self, reserved_node_offering_id, node_count=None): """ @@ -1983,7 +2691,10 @@ class RedshiftConnection(AWSQueryConnection): allow_version_upgrade=None, cluster_subnet_group_name=None, publicly_accessible=None, - owner_account=None): + owner_account=None, + hsm_client_certificate_identifier=None, + hsm_configuration_identifier=None, + elastic_ip=None): """ Creates a new cluster from a snapshot. Amazon Redshift creates the resulting cluster with the same configuration as the @@ -2023,7 +2734,10 @@ class RedshiftConnection(AWSQueryConnection): Example: `my-snapshot-id` :type snapshot_cluster_identifier: string - :param snapshot_cluster_identifier: + :param snapshot_cluster_identifier: The name of the cluster the source + snapshot was created from. This parameter is required if your IAM + user has a policy containing a snapshot resource element that + specifies anything other than * for the cluster name. :type port: integer :param port: The port number on which the cluster accepts connections. @@ -2060,6 +2774,19 @@ class RedshiftConnection(AWSQueryConnection): the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot. + :type hsm_client_certificate_identifier: string + :param hsm_client_certificate_identifier: Specifies the name of the HSM + client certificate the Amazon Redshift cluster uses to retrieve the + data encryption keys stored in an HSM. + + :type hsm_configuration_identifier: string + :param hsm_configuration_identifier: Specifies the name of the HSM + configuration that contains the information the Amazon Redshift + cluster can use to retrieve and store keys in an HSM. + + :type elastic_ip: string + :param elastic_ip: The elastic IP (EIP) address for the cluster. + """ params = { 'ClusterIdentifier': cluster_identifier, @@ -2081,6 +2808,12 @@ class RedshiftConnection(AWSQueryConnection): publicly_accessible).lower() if owner_account is not None: params['OwnerAccount'] = owner_account + if hsm_client_certificate_identifier is not None: + params['HsmClientCertificateIdentifier'] = hsm_client_certificate_identifier + if hsm_configuration_identifier is not None: + params['HsmConfigurationIdentifier'] = hsm_configuration_identifier + if elastic_ip is not None: + params['ElasticIp'] = elastic_ip return self._make_request( action='RestoreFromClusterSnapshot', verb='POST', @@ -2155,7 +2888,10 @@ class RedshiftConnection(AWSQueryConnection): account can no longer access. :type snapshot_cluster_identifier: string - :param snapshot_cluster_identifier: + :param snapshot_cluster_identifier: The identifier of the cluster the + snapshot was created from. This parameter is required if your IAM + user has a policy containing a snapshot resource element that + specifies anything other than * for the cluster name. :type account_with_restore_access: string :param account_with_restore_access: The identifier of the AWS customer @@ -2173,6 +2909,23 @@ class RedshiftConnection(AWSQueryConnection): verb='POST', path='/', params=params) + def rotate_encryption_key(self, cluster_identifier): + """ + Rotates the encryption keys for a cluster. + + :type cluster_identifier: string + :param cluster_identifier: The unique identifier of the cluster that + you want to rotate the encryption keys for. + Constraints: Must be the name of valid cluster that has encryption + enabled. + + """ + params = {'ClusterIdentifier': cluster_identifier, } + return self._make_request( + action='RotateEncryptionKey', + verb='POST', + path='/', params=params) + def _make_request(self, action, verb, path, params): params['ContentType'] = 'JSON' response = self.make_request(action=action, verb='POST', diff --git a/awx/lib/site-packages/boto/route53/connection.py b/awx/lib/site-packages/boto/route53/connection.py index 221b29b297..8ba8cd78ea 100644 --- a/awx/lib/site-packages/boto/route53/connection.py +++ b/awx/lib/site-packages/boto/route53/connection.py @@ -18,22 +18,25 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # -import xml.sax -import uuid +import exception +import random import urllib +import uuid +import xml.sax + import boto from boto.connection import AWSAuthConnection from boto import handler +import boto.jsonresponse from boto.route53.record import ResourceRecordSets from boto.route53.zone import Zone -import boto.jsonresponse -import exception + HZXML = """ @@ -43,7 +46,7 @@ HZXML = """ %(comment)s """ - + #boto.set_stream_logger('dns') @@ -60,12 +63,13 @@ class Route53Connection(AWSAuthConnection): def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, port=None, proxy=None, proxy_port=None, host=DefaultHost, debug=0, security_token=None, - validate_certs=True): + validate_certs=True, https_connection_factory=None): AWSAuthConnection.__init__(self, host, aws_access_key_id, aws_secret_access_key, True, port, proxy, proxy_port, debug=debug, security_token=security_token, - validate_certs=validate_certs) + validate_certs=validate_certs, + https_connection_factory=https_connection_factory) def _required_auth_capability(self): return ['route53'] @@ -79,7 +83,8 @@ class Route53Connection(AWSAuthConnection): pairs.append(key + '=' + urllib.quote(str(val))) path += '?' + '&'.join(pairs) return AWSAuthConnection.make_request(self, action, path, - headers, data) + headers, data, + retry_handler=self._retry_handler) # Hosted Zones @@ -118,7 +123,7 @@ class Route53Connection(AWSAuthConnection): def get_hosted_zone(self, hosted_zone_id): """ Get detailed information about a particular Hosted Zone. - + :type hosted_zone_id: str :param hosted_zone_id: The unique identifier for the Hosted Zone @@ -158,7 +163,7 @@ class Route53Connection(AWSAuthConnection): """ Create a new Hosted Zone. Returns a Python data structure with information about the newly created Hosted Zone. - + :type domain_name: str :param domain_name: The name of the domain. This should be a fully-specified domain, and should end with a final period @@ -178,7 +183,7 @@ class Route53Connection(AWSAuthConnection): use that. :type comment: str - :param comment: Any comments you want to include about the hosted + :param comment: Any comments you want to include about the hosted zone. """ @@ -204,7 +209,7 @@ class Route53Connection(AWSAuthConnection): raise exception.DNSServerError(response.status, response.reason, body) - + def delete_hosted_zone(self, hosted_zone_id): uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id) response = self.make_request('DELETE', uri) @@ -226,7 +231,7 @@ class Route53Connection(AWSAuthConnection): """ Retrieve the Resource Record Sets defined for this Hosted Zone. Returns the raw XML data returned by the Route53 call. - + :type hosted_zone_id: str :param hosted_zone_id: The unique identifier for the Hosted Zone @@ -401,3 +406,24 @@ class Route53Connection(AWSAuthConnection): if value and not value[-1] == '.': value = "%s." % value return value + + def _retry_handler(self, response, i, next_sleep): + status = None + boto.log.debug("Saw HTTP status: %s" % response.status) + + if response.status == 400: + code = response.getheader('Code') + + if code and 'PriorRequestNotComplete' in code: + # This is a case where we need to ignore a 400 error, as + # Route53 returns this. See + # http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html + msg = "%s, retry attempt %s" % ( + 'PriorRequestNotComplete', + i + ) + next_sleep = random.random() * (2 ** i) + i += 1 + status = (msg, i, next_sleep) + + return status diff --git a/awx/lib/site-packages/boto/s3/bucket.py b/awx/lib/site-packages/boto/s3/bucket.py index 335e9faaed..03d21e137c 100644 --- a/awx/lib/site-packages/boto/s3/bucket.py +++ b/awx/lib/site-packages/boto/s3/bucket.py @@ -63,6 +63,7 @@ class S3WebsiteEndpointTranslate: trans_region['sa-east-1'] = 's3-website-sa-east-1' trans_region['ap-northeast-1'] = 's3-website-ap-northeast-1' trans_region['ap-southeast-1'] = 's3-website-ap-southeast-1' + trans_region['ap-southeast-2'] = 's3-website-ap-southeast-2' @classmethod def translate_region(self, reg): @@ -341,6 +342,11 @@ class Bucket(object): raise self.connection.provider.storage_response_error( response.status, response.reason, body) + def _validate_kwarg_names(self, kwargs, names): + for kwarg in kwargs: + if kwarg not in names: + raise TypeError('Invalid argument %s!' % kwarg) + def get_all_keys(self, headers=None, **params): """ A lower-level method for listing contents of a bucket. This @@ -370,6 +376,8 @@ class Bucket(object): :return: The result from S3 listing the keys requested """ + self._validate_kwarg_names(params, ['maxkeys', 'max_keys', 'prefix', + 'marker', 'delimiter']) return self._get_all([('Contents', self.key_class), ('CommonPrefixes', Prefix)], '', headers, **params) @@ -407,6 +415,9 @@ class Bucket(object): :rtype: ResultSet :return: The result from S3 listing the keys requested """ + self._validate_kwarg_names(params, ['maxkeys', 'max_keys', 'prefix', + 'key_marker', 'version_id_marker', + 'delimiter']) return self._get_all([('Version', self.key_class), ('CommonPrefixes', Prefix), ('DeleteMarker', DeleteMarker)], @@ -450,6 +461,8 @@ class Bucket(object): :return: The result from S3 listing the uploads requested """ + self._validate_kwarg_names(params, ['max_uploads', 'key_marker', + 'upload_id_marker']) return self._get_all([('Upload', MultiPartUpload), ('CommonPrefixes', Prefix)], 'uploads', headers, **params) @@ -693,7 +706,8 @@ class Bucket(object): if self.name == src_bucket_name: src_bucket = self else: - src_bucket = self.connection.get_bucket(src_bucket_name) + src_bucket = self.connection.get_bucket( + src_bucket_name, validate=False) acl = src_bucket.get_xml_acl(src_key_name) if encrypt_key: headers[provider.server_side_encryption_header] = 'AES256' @@ -1300,6 +1314,7 @@ class Bucket(object): * ErrorDocument * Key : name of object to serve when an error occurs + """ return self.get_website_configuration_with_xml(headers)[0] @@ -1320,15 +1335,24 @@ class Bucket(object): :rtype: 2-Tuple :returns: 2-tuple containing: - 1) A dictionary containing a Python representation - of the XML response. The overall structure is: - * WebsiteConfiguration - * IndexDocument - * Suffix : suffix that is appended to request that - is for a "directory" on the website endpoint - * ErrorDocument - * Key : name of object to serve when an error occurs - 2) unparsed XML describing the bucket's website configuration. + + 1) A dictionary containing a Python representation \ + of the XML response. The overall structure is: + + * WebsiteConfiguration + + * IndexDocument + + * Suffix : suffix that is appended to request that \ + is for a "directory" on the website endpoint + + * ErrorDocument + + * Key : name of object to serve when an error occurs + + + 2) unparsed XML describing the bucket's website configuration + """ body = self.get_website_configuration_xml(headers=headers) diff --git a/awx/lib/site-packages/boto/sns/connection.py b/awx/lib/site-packages/boto/sns/connection.py index 73f3d9e93f..701892ce86 100644 --- a/awx/lib/site-packages/boto/sns/connection.py +++ b/awx/lib/site-packages/boto/sns/connection.py @@ -264,7 +264,7 @@ class SNSConnection(AWSQueryConnection): :type protocol: string :param protocol: The protocol used to communicate with the subscriber. Current choices are: - email|email-json|http|https|sqs + email|email-json|http|https|sqs|sms :type endpoint: string :param endpoint: The location of the endpoint for @@ -274,6 +274,7 @@ class SNSConnection(AWSQueryConnection): * For http, this would be a URL beginning with http * For https, this would be a URL beginning with https * For sqs, this would be the ARN of an SQS Queue + * For sms, this would be a phone number of an SMS-enabled device """ params = {'TopicArn': topic, 'Protocol': protocol, diff --git a/awx/lib/site-packages/boto/sqs/connection.py b/awx/lib/site-packages/boto/sqs/connection.py index e076de124e..5e8d5d029d 100644 --- a/awx/lib/site-packages/boto/sqs/connection.py +++ b/awx/lib/site-packages/boto/sqs/connection.py @@ -286,8 +286,8 @@ class SQSConnection(AWSQueryConnection): :param queue: The Queue from which messages are read. :type receipt_handle: str - :param queue: The receipt handle associated with the message whose - visibility timeout will be changed. + :param receipt_handle: The receipt handle associated with the message + whose visibility timeout will be changed. :type visibility_timeout: int :param visibility_timeout: The new value of the message's visibility @@ -337,16 +337,19 @@ class SQSConnection(AWSQueryConnection): params['QueueNamePrefix'] = prefix return self.get_list('ListQueues', params, [('QueueUrl', Queue)]) - def get_queue(self, queue_name): + def get_queue(self, queue_name, owner_acct_id=None): """ Retrieves the queue with the given name, or ``None`` if no match was found. :param str queue_name: The name of the queue to retrieve. + :param str owner_acct_id: Optionally, the AWS account ID of the account that created the queue. :rtype: :py:class:`boto.sqs.queue.Queue` or ``None`` :returns: The requested queue, or ``None`` if no match was found. """ params = {'QueueName': queue_name} + if owner_acct_id: + params['QueueOwnerAWSAccountId']=owner_acct_id try: return self.get_object('GetQueueUrl', params, Queue) except SQSError: diff --git a/awx/lib/site-packages/boto/sqs/message.py b/awx/lib/site-packages/boto/sqs/message.py index f0666e5601..f7aa6980f9 100644 --- a/awx/lib/site-packages/boto/sqs/message.py +++ b/awx/lib/site-packages/boto/sqs/message.py @@ -95,7 +95,7 @@ class RawMessage: def endElement(self, name, value, connection): if name == 'Body': - self.set_body(self.decode(value)) + self.set_body(value) elif name == 'MessageId': self.id = value elif name == 'ReceiptHandle': @@ -105,6 +105,9 @@ class RawMessage: else: setattr(self, name, value) + def endNode(self, connection): + self.set_body(self.decode(self.get_body())) + def encode(self, value): """Transform body object into serialized byte array format.""" return value diff --git a/awx/lib/site-packages/boto/swf/layer2.py b/awx/lib/site-packages/boto/swf/layer2.py index dd467b42a6..fbb9f33d08 100644 --- a/awx/lib/site-packages/boto/swf/layer2.py +++ b/awx/lib/site-packages/boto/swf/layer2.py @@ -188,7 +188,11 @@ class ActivityWorker(Actor): @wraps(Layer1.poll_for_activity_task) def poll(self, **kwargs): """PollForActivityTask.""" - task = self._swf.poll_for_activity_task(self.domain, self.task_list, + task_list = self.task_list + if 'task_list' in kwargs: + task_list = kwargs.get('task_list') + del kwargs['task_list'] + task = self._swf.poll_for_activity_task(self.domain, task_list, **kwargs) self.last_tasktoken = task.get('taskToken') return task @@ -211,12 +215,14 @@ class Decider(Actor): @wraps(Layer1.poll_for_decision_task) def poll(self, **kwargs): """PollForDecisionTask.""" - result = self._swf.poll_for_decision_task(self.domain, self.task_list, + task_list = self.task_list + if 'task_list' in kwargs: + task_list = kwargs.get('task_list') + del kwargs['task_list'] + decision_task = self._swf.poll_for_decision_task(self.domain, task_list, **kwargs) - # Record task token. - self.last_tasktoken = result.get('taskToken') - # Record the last event. - return result + self.last_tasktoken = decision_task.get('taskToken') + return decision_task class WorkflowType(SWFBase): diff --git a/awx/lib/site-packages/boto/vpc/__init__.py b/awx/lib/site-packages/boto/vpc/__init__.py index 24a93a7409..7b3ea9d290 100644 --- a/awx/lib/site-packages/boto/vpc/__init__.py +++ b/awx/lib/site-packages/boto/vpc/__init__.py @@ -27,6 +27,7 @@ from boto.ec2.connection import EC2Connection from boto.resultset import ResultSet from boto.vpc.vpc import VPC from boto.vpc.customergateway import CustomerGateway +from boto.vpc.networkacl import NetworkAcl from boto.vpc.routetable import RouteTable from boto.vpc.internetgateway import InternetGateway from boto.vpc.vpngateway import VpnGateway, Attachment @@ -36,6 +37,7 @@ from boto.vpc.vpnconnection import VpnConnection from boto.ec2 import RegionData from boto.regioninfo import RegionInfo + def regions(**kw_params): """ Get all available regions for the EC2 service. @@ -53,9 +55,8 @@ def regions(**kw_params): connection_cls=VPCConnection) regions.append(region) regions.append(RegionInfo(name='us-gov-west-1', - endpoint=RegionData[region_name], - connection_cls=VPCConnection) - ) + endpoint=RegionData[region_name], + connection_cls=VPCConnection)) return regions @@ -117,20 +118,26 @@ class VPCConnection(EC2Connection): params['DryRun'] = 'true' return self.get_list('DescribeVpcs', params, [('item', VPC)]) - def create_vpc(self, cidr_block, dry_run=False): + def create_vpc(self, cidr_block, instance_tenancy=None, dry_run=False): """ Create a new Virtual Private Cloud. :type cidr_block: str :param cidr_block: A valid CIDR block + :type instance_tenancy: str + :param instance_tenancy: The supported tenancy options for instances + launched into the VPC. Valid values are 'default' and 'dedicated'. + :type dry_run: bool :param dry_run: Set to True if the operation should not actually run. :rtype: The newly created VPC :return: A :class:`boto.vpc.vpc.VPC` object """ - params = {'CidrBlock' : cidr_block} + params = {'CidrBlock': cidr_block} + if instance_tenancy: + params['InstanceTenancy'] = instance_tenancy if dry_run: params['DryRun'] = 'true' return self.get_object('CreateVpc', params, VPC) @@ -266,7 +273,7 @@ class VPCConnection(EC2Connection): :rtype: bool :return: True if successful """ - params = { 'AssociationId': association_id } + params = {'AssociationId': association_id} if dry_run: params['DryRun'] = 'true' return self.get_status('DisassociateRouteTable', params) @@ -284,7 +291,7 @@ class VPCConnection(EC2Connection): :rtype: The newly created route table :return: A :class:`boto.vpc.routetable.RouteTable` object """ - params = { 'VpcId': vpc_id } + params = {'VpcId': vpc_id} if dry_run: params['DryRun'] = 'true' return self.get_object('CreateRouteTable', params, RouteTable) @@ -302,13 +309,96 @@ class VPCConnection(EC2Connection): :rtype: bool :return: True if successful """ - params = { 'RouteTableId': route_table_id } + params = {'RouteTableId': route_table_id} if dry_run: params['DryRun'] = 'true' return self.get_status('DeleteRouteTable', params) + def _replace_route_table_association(self, association_id, + route_table_id, dry_run=False): + """ + Helper function for replace_route_table_association and + replace_route_table_association_with_assoc. Should not be used directly. + + :type association_id: str + :param association_id: The ID of the existing association to replace. + + :type route_table_id: str + :param route_table_id: The route table to ID to be used in the + association. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: ResultSet + :return: ResultSet of Amazon resposne + """ + params = { + 'AssociationId': association_id, + 'RouteTableId': route_table_id + } + if dry_run: + params['DryRun'] = 'true' + return self.get_object('ReplaceRouteTableAssociation', params, + ResultSet) + + def replace_route_table_assocation(self, association_id, + route_table_id, dry_run=False): + """ + Replaces a route association with a new route table. This can be + used to replace the 'main' route table by using the main route + table association instead of the more common subnet type + association. + + NOTE: It may be better to use replace_route_table_association_with_assoc + instead of this function; this function does not return the new + association ID. This function is retained for backwards compatibility. + + + :type association_id: str + :param association_id: The ID of the existing association to replace. + + :type route_table_id: str + :param route_table_id: The route table to ID to be used in the + association. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + return self._replace_route_table_association( + association_id, route_table_id, dry_run=dry_run).status + + def replace_route_table_association_with_assoc(self, association_id, + route_table_id, + dry_run=False): + """ + Replaces a route association with a new route table. This can be + used to replace the 'main' route table by using the main route + table association instead of the more common subnet type + association. Returns the new association ID. + + :type association_id: str + :param association_id: The ID of the existing association to replace. + + :type route_table_id: str + :param route_table_id: The route table to ID to be used in the + association. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: str + :return: New association ID + """ + return self._replace_route_table_association( + association_id, route_table_id, dry_run=dry_run).newAssociationId + def create_route(self, route_table_id, destination_cidr_block, - gateway_id=None, instance_id=None, dry_run=False): + gateway_id=None, instance_id=None, interface_id=None, + dry_run=False): """ Creates a new route in the route table within a VPC. The route's target can be either a gateway attached to the VPC or a NAT instance in the @@ -327,6 +417,9 @@ class VPCConnection(EC2Connection): :type instance_id: str :param instance_id: The ID of a NAT instance in your VPC. + :type interface_id: str + :param interface_id: Allows routing to network interface attachments. + :type dry_run: bool :param dry_run: Set to True if the operation should not actually run. @@ -342,14 +435,16 @@ class VPCConnection(EC2Connection): params['GatewayId'] = gateway_id elif instance_id is not None: params['InstanceId'] = instance_id + elif interface_id is not None: + params['NetworkInterfaceId'] = interface_id if dry_run: params['DryRun'] = 'true' return self.get_status('CreateRoute', params) def replace_route(self, route_table_id, destination_cidr_block, - gateway_id=None, instance_id=None, interface_id=None, - dry_run=False): + gateway_id=None, instance_id=None, interface_id=None, + dry_run=False): """ Replaces an existing route within a route table in a VPC. @@ -417,6 +512,271 @@ class VPCConnection(EC2Connection): params['DryRun'] = 'true' return self.get_status('DeleteRoute', params) + #Network ACLs + + def get_all_network_acls(self, network_acl_ids=None, filters=None): + """ + Retrieve information about your network acls. You can filter results + to return information only about those network acls that match your + search parameters. Otherwise, all network acls associated with your + account are returned. + + :type network_acl_ids: list + :param network_acl_ids: A list of strings with the desired network ACL + IDs. + + :type filters: list of tuples + :param filters: A list of tuples containing filters. Each tuple + consists of a filter key and a filter value. + + :rtype: list + :return: A list of :class:`boto.vpc.networkacl.NetworkAcl` + """ + params = {} + if network_acl_ids: + self.build_list_params(params, network_acl_ids, "NetworkAclId") + if filters: + self.build_filter_params(params, dict(filters)) + return self.get_list('DescribeNetworkAcls', params, + [('item', NetworkAcl)]) + + def associate_network_acl(self, network_acl_id, subnet_id): + """ + Associates a network acl with a specific subnet. + + :type network_acl_id: str + :param network_acl_id: The ID of the network ACL to associate. + + :type subnet_id: str + :param subnet_id: The ID of the subnet to associate with. + + :rtype: str + :return: The ID of the association created + """ + + acl = self.get_all_network_acls(filters=[('association.subnet-id', subnet_id)])[0] + association = [ association for association in acl.associations if association.subnet_id == subnet_id ][0] + + params = { + 'AssociationId': association.id, + 'NetworkAclId': network_acl_id + } + + result = self.get_object('ReplaceNetworkAclAssociation', params, ResultSet) + return result.newAssociationId + + def disassociate_network_acl(self, subnet_id, vpc_id=None): + """ + Figures out what the default ACL is for the VPC, and associates + current network ACL with the default. + + :type subnet_id: str + :param association_id: The ID of the subnet to which the ACL belongs. + + :type vpc_id: str + :param vpc_id: The ID of the VPC to which the ACL/subnet belongs. Queries EC2 if omitted. + + :rtype: str + :return: The ID of the association created + """ + if not vpc_id: + vpc_id = self.get_all_subnets([subnet_id])[0].vpc_id + acls = self.get_all_network_acls(filters=[('vpc-id', vpc_id), ('default', 'true')]) + default_acl_id = acls[0].id + + return self.associate_network_acl(default_acl_id, subnet_id) + + def create_network_acl(self, vpc_id): + """ + Creates a new network ACL. + + :type vpc_id: str + :param vpc_id: The VPC ID to associate this network ACL with. + + :rtype: The newly created network ACL + :return: A :class:`boto.vpc.networkacl.NetworkAcl` object + """ + params = {'VpcId': vpc_id} + return self.get_object('CreateNetworkAcl', params, NetworkAcl) + + def delete_network_acl(self, network_acl_id): + """ + Delete a network ACL + + :type network_acl_id: str + :param network_acl_id: The ID of the network_acl to delete. + + :rtype: bool + :return: True if successful + """ + params = {'NetworkAclId': network_acl_id} + return self.get_status('DeleteNetworkAcl', params) + + def create_network_acl_entry(self, network_acl_id, rule_number, protocol, rule_action, + cidr_block, egress=None, icmp_code=None, icmp_type=None, + port_range_from=None, port_range_to=None): + """ + Creates a new network ACL entry in a network ACL within a VPC. + + :type network_acl_id: str + :param network_acl_id: The ID of the network ACL for this network ACL entry. + + :type rule_number: int + :param rule_number: The rule number to assign to the entry (for example, 100). + + :type protocol: int + :param protocol: Valid values: -1 or a protocol number + (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) + + :type rule_action: str + :param rule_action: Indicates whether to allow or deny traffic that matches the rule. + + :type cidr_block: str + :param cidr_block: The CIDR range to allow or deny, in CIDR notation (for example, + 172.16.0.0/24). + + :type egress: bool + :param egress: Indicates whether this rule applies to egress traffic from the subnet (true) + or ingress traffic to the subnet (false). + + :type icmp_type: int + :param icmp_type: For the ICMP protocol, the ICMP type. You can use -1 to specify + all ICMP types. + + :type icmp_code: int + :param icmp_code: For the ICMP protocol, the ICMP code. You can use -1 to specify + all ICMP codes for the given ICMP type. + + :type port_range_from: int + :param port_range_from: The first port in the range. + + :type port_range_to: int + :param port_range_to: The last port in the range. + + + :rtype: bool + :return: True if successful + """ + params = { + 'NetworkAclId': network_acl_id, + 'RuleNumber': rule_number, + 'Protocol': protocol, + 'RuleAction': rule_action, + 'CidrBlock': cidr_block + } + + if egress is not None: + if isinstance(egress, bool): + egress = str(egress).lower() + params['Egress'] = egress + if icmp_code is not None: + params['Icmp.Code'] = icmp_code + if icmp_type is not None: + params['Icmp.Type'] = icmp_type + if port_range_from is not None: + params['PortRange.From'] = port_range_from + if port_range_to is not None: + params['PortRange.To'] = port_range_to + + return self.get_status('CreateNetworkAclEntry', params) + + def replace_network_acl_entry(self, network_acl_id, rule_number, protocol, rule_action, + cidr_block, egress=None, icmp_code=None, icmp_type=None, + port_range_from=None, port_range_to=None): + """ + Creates a new network ACL entry in a network ACL within a VPC. + + :type network_acl_id: str + :param network_acl_id: The ID of the network ACL for the id you want to replace + + :type rule_number: int + :param rule_number: The rule number that you want to replace(for example, 100). + + :type protocol: int + :param protocol: Valid values: -1 or a protocol number + (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml) + + :type rule_action: str + :param rule_action: Indicates whether to allow or deny traffic that matches the rule. + + :type cidr_block: str + :param cidr_block: The CIDR range to allow or deny, in CIDR notation (for example, + 172.16.0.0/24). + + :type egress: bool + :param egress: Indicates whether this rule applies to egress traffic from the subnet (true) + or ingress traffic to the subnet (false). + + :type icmp_type: int + :param icmp_type: For the ICMP protocol, the ICMP type. You can use -1 to specify + all ICMP types. + + :type icmp_code: int + :param icmp_code: For the ICMP protocol, the ICMP code. You can use -1 to specify + all ICMP codes for the given ICMP type. + + :type port_range_from: int + :param port_range_from: The first port in the range. + + :type port_range_to: int + :param port_range_to: The last port in the range. + + + :rtype: bool + :return: True if successful + """ + params = { + 'NetworkAclId': network_acl_id, + 'RuleNumber': rule_number, + 'Protocol': protocol, + 'RuleAction': rule_action, + 'CidrBlock': cidr_block + } + + if egress is not None: + if isinstance(egress, bool): + egress = str(egress).lower() + params['Egress'] = egress + if icmp_code is not None: + params['Icmp.Code'] = icmp_code + if icmp_type is not None: + params['Icmp.Type'] = icmp_type + if port_range_from is not None: + params['PortRange.From'] = port_range_from + if port_range_to is not None: + params['PortRange.To'] = port_range_to + + return self.get_status('ReplaceNetworkAclEntry', params) + + def delete_network_acl_entry(self, network_acl_id, rule_number, egress=None): + """ + Deletes a network ACL entry from a network ACL within a VPC. + + :type network_acl_id: str + :param network_acl_id: The ID of the network ACL with the network ACL entry. + + :type rule_number: int + :param rule_number: The rule number for the entry to delete. + + :type egress: bool + :param egress: Specifies whether the rule to delete is an egress rule (true) + or ingress rule (false). + + :rtype: bool + :return: True if successful + """ + params = { + 'NetworkAclId': network_acl_id, + 'RuleNumber': rule_number + } + + if egress is not None: + if isinstance(egress, bool): + egress = str(egress).lower() + params['Egress'] = egress + + return self.get_status('DeleteNetworkAclEntry', params) + # Internet Gateways def get_all_internet_gateways(self, internet_gateway_ids=None, @@ -476,7 +836,7 @@ class VPCConnection(EC2Connection): :rtype: Bool :return: True if successful """ - params = { 'InternetGatewayId': internet_gateway_id } + params = {'InternetGatewayId': internet_gateway_id} if dry_run: params['DryRun'] = 'true' return self.get_status('DeleteInternetGateway', params) @@ -586,7 +946,7 @@ class VPCConnection(EC2Connection): :param ip_address: Internet-routable IP address for customer's gateway. Must be a static address. - :type bgp_asn: str + :type bgp_asn: int :param bgp_asn: Customer gateway's Border Gateway Protocol (BGP) Autonomous System Number (ASN) @@ -596,9 +956,9 @@ class VPCConnection(EC2Connection): :rtype: The newly created CustomerGateway :return: A :class:`boto.vpc.customergateway.CustomerGateway` object """ - params = {'Type' : type, - 'IpAddress' : ip_address, - 'BgpAsn' : bgp_asn} + params = {'Type': type, + 'IpAddress': ip_address, + 'BgpAsn': bgp_asn} if dry_run: params['DryRun'] = 'true' return self.get_object('CreateCustomerGateway', params, CustomerGateway) @@ -677,7 +1037,7 @@ class VPCConnection(EC2Connection): :rtype: The newly created VpnGateway :return: A :class:`boto.vpc.vpngateway.VpnGateway` object """ - params = {'Type' : type} + params = {'Type': type} if availability_zone: params['AvailabilityZone'] = availability_zone if dry_run: @@ -719,11 +1079,33 @@ class VPCConnection(EC2Connection): :return: a :class:`boto.vpc.vpngateway.Attachment` """ params = {'VpnGatewayId': vpn_gateway_id, - 'VpcId' : vpc_id} + 'VpcId': vpc_id} if dry_run: params['DryRun'] = 'true' return self.get_object('AttachVpnGateway', params, Attachment) + def detach_vpn_gateway(self, vpn_gateway_id, vpc_id, dry_run=False): + """ + Detaches a VPN gateway from a VPC. + + :type vpn_gateway_id: str + :param vpn_gateway_id: The ID of the vpn_gateway to detach + + :type vpc_id: str + :param vpc_id: The ID of the VPC you want to detach the gateway from. + + :type dry_run: bool + :param dry_run: Set to True if the operation should not actually run. + + :rtype: bool + :return: True if successful + """ + params = {'VpnGatewayId': vpn_gateway_id, + 'VpcId': vpc_id} + if dry_run: + params['DryRun'] = 'true' + return self.get_status('DetachVpnGateway', params) + # Subnets def get_all_subnets(self, subnet_ids=None, filters=None, dry_run=False): @@ -784,8 +1166,8 @@ class VPCConnection(EC2Connection): :rtype: The newly created Subnet :return: A :class:`boto.vpc.customergateway.Subnet` object """ - params = {'VpcId' : vpc_id, - 'CidrBlock' : cidr_block} + params = {'VpcId': vpc_id, + 'CidrBlock': cidr_block} if availability_zone: params['AvailabilityZone'] = availability_zone if dry_run: @@ -810,16 +1192,19 @@ class VPCConnection(EC2Connection): params['DryRun'] = 'true' return self.get_status('DeleteSubnet', params) - # DHCP Options - def get_all_dhcp_options(self, dhcp_options_ids=None, dry_run=False): + def get_all_dhcp_options(self, dhcp_options_ids=None, filters=None, dry_run=False): """ Retrieve information about your DhcpOptions. :type dhcp_options_ids: list :param dhcp_options_ids: A list of strings with the desired DhcpOption ID's + :type filters: list of tuples + :param filters: A list of tuples containing filters. Each tuple + consists of a filter key and a filter value. + :type dry_run: bool :param dry_run: Set to True if the operation should not actually run. @@ -829,6 +1214,8 @@ class VPCConnection(EC2Connection): params = {} if dhcp_options_ids: self.build_list_params(params, dhcp_options_ids, 'DhcpOptionsId') + if filters: + self.build_filter_params(params, dict(filters)) if dry_run: params['DryRun'] = 'true' return self.get_list('DescribeDhcpOptions', params, @@ -890,19 +1277,19 @@ class VPCConnection(EC2Connection): if domain_name: key_counter = insert_option(params, - 'domain-name', domain_name) + 'domain-name', domain_name) if domain_name_servers: key_counter = insert_option(params, - 'domain-name-servers', domain_name_servers) + 'domain-name-servers', domain_name_servers) if ntp_servers: key_counter = insert_option(params, - 'ntp-servers', ntp_servers) + 'ntp-servers', ntp_servers) if netbios_name_servers: key_counter = insert_option(params, - 'netbios-name-servers', netbios_name_servers) + 'netbios-name-servers', netbios_name_servers) if netbios_node_type: key_counter = insert_option(params, - 'netbios-node-type', netbios_node_type) + 'netbios-node-type', netbios_node_type) if dry_run: params['DryRun'] = 'true' @@ -943,7 +1330,7 @@ class VPCConnection(EC2Connection): :return: True if successful """ params = {'DhcpOptionsId': dhcp_options_id, - 'VpcId' : vpc_id} + 'VpcId': vpc_id} if dry_run: params['DryRun'] = 'true' return self.get_status('AssociateDhcpOptions', params) @@ -983,7 +1370,7 @@ class VPCConnection(EC2Connection): params = {} if vpn_connection_ids: self.build_list_params(params, vpn_connection_ids, - 'Vpn_ConnectionId') + 'VpnConnectionId') if filters: self.build_filter_params(params, dict(filters)) if dry_run: @@ -992,7 +1379,7 @@ class VPCConnection(EC2Connection): [('item', VpnConnection)]) def create_vpn_connection(self, type, customer_gateway_id, vpn_gateway_id, - dry_run=False): + static_routes_only=None, dry_run=False): """ Create a new VPN Connection. @@ -1006,15 +1393,24 @@ class VPCConnection(EC2Connection): :type vpn_gateway_id: str :param vpn_gateway_id: The ID of the VPN gateway. + :type static_routes_only: bool + :param static_routes_only: Indicates whether the VPN connection + requires static routes. If you are creating a VPN connection + for a device that does not support BGP, you must specify true. + :type dry_run: bool :param dry_run: Set to True if the operation should not actually run. :rtype: The newly created VpnConnection :return: A :class:`boto.vpc.vpnconnection.VpnConnection` object """ - params = {'Type' : type, - 'CustomerGatewayId' : customer_gateway_id, - 'VpnGatewayId' : vpn_gateway_id} + params = {'Type': type, + 'CustomerGatewayId': customer_gateway_id, + 'VpnGatewayId': vpn_gateway_id} + if static_routes_only is not None: + if isinstance(static_routes_only, bool): + static_routes_only = str(static_routes_only).lower() + params['Options.StaticRoutesOnly'] = static_routes_only if dry_run: params['DryRun'] = 'true' return self.get_object('CreateVpnConnection', params, VpnConnection) diff --git a/awx/lib/site-packages/boto/vpc/customergateway.py b/awx/lib/site-packages/boto/vpc/customergateway.py index 959d01fbca..76c34831a9 100644 --- a/awx/lib/site-packages/boto/vpc/customergateway.py +++ b/awx/lib/site-packages/boto/vpc/customergateway.py @@ -14,7 +14,7 @@ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. @@ -25,6 +25,7 @@ Represents a Customer Gateway from boto.ec2.ec2object import TaggedEC2Object + class CustomerGateway(TaggedEC2Object): def __init__(self, connection=None): @@ -37,7 +38,7 @@ class CustomerGateway(TaggedEC2Object): def __repr__(self): return 'CustomerGateway:%s' % self.id - + def endElement(self, name, value, connection): if name == 'customerGatewayId': self.id = value @@ -48,7 +49,6 @@ class CustomerGateway(TaggedEC2Object): elif name == 'state': self.state = value elif name == 'bgpAsn': - self.bgp_asn = value + self.bgp_asn = int(value) else: setattr(self, name, value) - diff --git a/awx/lib/site-packages/boto/vpc/networkacl.py b/awx/lib/site-packages/boto/vpc/networkacl.py new file mode 100644 index 0000000000..4771479176 --- /dev/null +++ b/awx/lib/site-packages/boto/vpc/networkacl.py @@ -0,0 +1,164 @@ +# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/ +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + +""" +Represents a Network ACL +""" + +from boto.ec2.ec2object import TaggedEC2Object +from boto.resultset import ResultSet + + +class Icmp(object): + """ + Defines the ICMP code and type. + """ + def __init__(self, connection=None): + self.code = None + self.type = None + + def __repr__(self): + return 'Icmp::code:%s, type:%s)' % ( self.code, self.type) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + + if name == 'code': + self.code = value + elif name == 'type': + self.type = value + +class NetworkAcl(TaggedEC2Object): + + def __init__(self, connection=None): + TaggedEC2Object.__init__(self, connection) + self.id = None + self.vpc_id = None + self.network_acl_entries = [] + self.associations = [] + + def __repr__(self): + return 'NetworkAcl:%s' % self.id + + def startElement(self, name, attrs, connection): + result = super(NetworkAcl, self).startElement(name, attrs, connection) + + if result is not None: + # Parent found an interested element, just return it + return result + + if name == 'entrySet': + self.network_acl_entries = ResultSet([('item', NetworkAclEntry)]) + return self.network_acl_entries + elif name == 'associationSet': + self.associations = ResultSet([('item', NetworkAclAssociation)]) + return self.associations + else: + return None + + def endElement(self, name, value, connection): + if name == 'networkAclId': + self.id = value + elif name == 'vpcId': + self.vpc_id = value + else: + setattr(self, name, value) + +class NetworkAclEntry(object): + def __init__(self, connection=None): + self.rule_number = None + self.protocol = None + self.rule_action = None + self.egress = None + self.cidr_block = None + self.port_range = PortRange() + self.icmp = Icmp() + + def __repr__(self): + return 'Acl:%s' % self.rule_number + + def startElement(self, name, attrs, connection): + + if name == 'portRange': + return self.port_range + elif name == 'icmpTypeCode': + return self.icmp + else: + return None + + def endElement(self, name, value, connection): + if name == 'cidrBlock': + self.cidr_block = value + elif name == 'egress': + self.egress = value + elif name == 'protocol': + self.protocol = value + elif name == 'ruleAction': + self.rule_action = value + elif name == 'ruleNumber': + self.rule_number = value + + +class NetworkAclAssociation(object): + def __init__(self, connection=None): + self.id = None + self.subnet_id = None + self.network_acl_id = None + + def __repr__(self): + return 'NetworkAclAssociation:%s' % self.id + + def startElement(self, name, attrs, connection): + return None + + def endElement(self, name, value, connection): + if name == 'networkAclAssociationId': + self.id = value + elif name == 'networkAclId': + self.route_table_id = value + elif name == 'subnetId': + self.subnet_id = value + +class PortRange(object): + """ + Define the port range for the ACL entry if it is tcp / udp + """ + + def __init__(self, connection=None): + self.from_port = None + self.to_port = None + + def __repr__(self): + return 'PortRange:(%s-%s)' % ( self.from_port, self.to_port) + + def startElement(self, name, attrs, connection): + pass + + def endElement(self, name, value, connection): + + if name == 'from': + self.from_port = value + elif name == 'to': + self.to_port = value + + diff --git a/awx/lib/site-packages/celery/__compat__.py b/awx/lib/site-packages/celery/__compat__.py deleted file mode 100644 index 08700719cb..0000000000 --- a/awx/lib/site-packages/celery/__compat__.py +++ /dev/null @@ -1,208 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.__compat__ - ~~~~~~~~~~~~~~~~~ - - This module contains utilities to dynamically - recreate modules, either for lazy loading or - to create old modules at runtime instead of - having them litter the source tree. - -""" -from __future__ import absolute_import - -import operator -import sys - -# import fails in python 2.5. fallback to reduce in stdlib -try: - from functools import reduce -except ImportError: - pass - -from importlib import import_module -from types import ModuleType - -from .local import Proxy - -MODULE_DEPRECATED = """ -The module %s is deprecated and will be removed in a future version. -""" - -DEFAULT_ATTRS = set(['__file__', '__path__', '__doc__', '__all__']) - -# im_func is no longer available in Py3. -# instead the unbound method itself can be used. -if sys.version_info[0] == 3: # pragma: no cover - def fun_of_method(method): - return method -else: - def fun_of_method(method): # noqa - return method.im_func - - -def getappattr(path): - """Gets attribute from the current_app recursively, - e.g. getappattr('amqp.get_task_consumer')``.""" - from celery import current_app - return current_app._rgetattr(path) - - -def _compat_task_decorator(*args, **kwargs): - from celery import current_app - kwargs.setdefault('accept_magic_kwargs', True) - return current_app.task(*args, **kwargs) - - -def _compat_periodic_task_decorator(*args, **kwargs): - from celery.task import periodic_task - kwargs.setdefault('accept_magic_kwargs', True) - return periodic_task(*args, **kwargs) - - -COMPAT_MODULES = { - 'celery': { - 'execute': { - 'send_task': 'send_task', - }, - 'decorators': { - 'task': _compat_task_decorator, - 'periodic_task': _compat_periodic_task_decorator, - }, - 'log': { - 'get_default_logger': 'log.get_default_logger', - 'setup_logger': 'log.setup_logger', - 'setup_loggig_subsystem': 'log.setup_logging_subsystem', - 'redirect_stdouts_to_logger': 'log.redirect_stdouts_to_logger', - }, - 'messaging': { - 'TaskPublisher': 'amqp.TaskPublisher', - 'TaskConsumer': 'amqp.TaskConsumer', - 'establish_connection': 'connection', - 'with_connection': 'with_default_connection', - 'get_consumer_set': 'amqp.TaskConsumer', - }, - 'registry': { - 'tasks': 'tasks', - }, - }, - 'celery.task': { - 'control': { - 'broadcast': 'control.broadcast', - 'rate_limit': 'control.rate_limit', - 'time_limit': 'control.time_limit', - 'ping': 'control.ping', - 'revoke': 'control.revoke', - 'discard_all': 'control.purge', - 'inspect': 'control.inspect', - }, - 'schedules': 'celery.schedules', - 'chords': 'celery.canvas', - } -} - - -class class_property(object): - - def __init__(self, fget=None, fset=None): - assert fget and isinstance(fget, classmethod) - assert isinstance(fset, classmethod) if fset else True - self.__get = fget - self.__set = fset - - info = fget.__get__(object) # just need the info attrs. - self.__doc__ = info.__doc__ - self.__name__ = info.__name__ - self.__module__ = info.__module__ - - def __get__(self, obj, type=None): - if obj and type is None: - type = obj.__class__ - return self.__get.__get__(obj, type)() - - def __set__(self, obj, value): - if obj is None: - return self - return self.__set.__get__(obj)(value) - - -def reclassmethod(method): - return classmethod(fun_of_method(method)) - - -class MagicModule(ModuleType): - _compat_modules = () - _all_by_module = {} - _direct = {} - _object_origins = {} - - def __getattr__(self, name): - if name in self._object_origins: - module = __import__(self._object_origins[name], None, None, [name]) - for item in self._all_by_module[module.__name__]: - setattr(self, item, getattr(module, item)) - return getattr(module, name) - elif name in self._direct: - module = __import__(self._direct[name], None, None, [name]) - setattr(self, name, module) - return module - return ModuleType.__getattribute__(self, name) - - def __dir__(self): - return list(set(self.__all__) | DEFAULT_ATTRS) - - -def create_module(name, attrs, cls_attrs=None, pkg=None, - base=MagicModule, prepare_attr=None): - fqdn = '.'.join([pkg.__name__, name]) if pkg else name - cls_attrs = {} if cls_attrs is None else cls_attrs - - attrs = dict((attr_name, prepare_attr(attr) if prepare_attr else attr) - for attr_name, attr in attrs.iteritems()) - module = sys.modules[fqdn] = type(name, (base, ), cls_attrs)(fqdn) - module.__dict__.update(attrs) - return module - - -def recreate_module(name, compat_modules=(), by_module={}, direct={}, - base=MagicModule, **attrs): - old_module = sys.modules[name] - origins = get_origins(by_module) - compat_modules = COMPAT_MODULES.get(name, ()) - - cattrs = dict( - _compat_modules=compat_modules, - _all_by_module=by_module, _direct=direct, - _object_origins=origins, - __all__=tuple(set(reduce( - operator.add, - [tuple(v) for v in [compat_modules, origins, direct, attrs]], - ))), - ) - new_module = create_module(name, attrs, cls_attrs=cattrs, base=base) - new_module.__dict__.update(dict((mod, get_compat_module(new_module, mod)) - for mod in compat_modules)) - return old_module, new_module - - -def get_compat_module(pkg, name): - - def prepare(attr): - if isinstance(attr, basestring): - return Proxy(getappattr, (attr, )) - return attr - - attrs = COMPAT_MODULES[pkg.__name__][name] - if isinstance(attrs, basestring): - fqdn = '.'.join([pkg.__name__, name]) - module = sys.modules[fqdn] = import_module(attrs) - return module - attrs['__all__'] = list(attrs) - return create_module(name, dict(attrs), pkg=pkg, prepare_attr=prepare) - - -def get_origins(defs): - origins = {} - for module, items in defs.iteritems(): - origins.update(dict((item, module) for item in items)) - return origins diff --git a/awx/lib/site-packages/celery/__init__.py b/awx/lib/site-packages/celery/__init__.py index 335047b024..5a26442f98 100644 --- a/awx/lib/site-packages/celery/__init__.py +++ b/awx/lib/site-packages/celery/__init__.py @@ -2,45 +2,126 @@ """Distributed Task Queue""" # :copyright: (c) 2009 - 2012 Ask Solem and individual contributors, # All rights reserved. -# :copyright: (c) 2012 VMware, Inc., All rights reserved. +# :copyright: (c) 2012-2013 GoPivotal, Inc., All rights reserved. # :license: BSD (3 Clause), see LICENSE for more details. from __future__ import absolute_import -SERIES = 'Chiastic Slide' -VERSION = (3, 0, 23) +SERIES = 'Cipater' +VERSION = (3, 1, 3) __version__ = '.'.join(str(p) for p in VERSION[0:3]) + ''.join(VERSION[3:]) __author__ = 'Ask Solem' __contact__ = 'ask@celeryproject.org' __homepage__ = 'http://celeryproject.org' __docformat__ = 'restructuredtext' __all__ = [ - 'Celery', 'bugreport', 'shared_task', 'Task', - 'current_app', 'current_task', - 'chain', 'chord', 'chunks', 'group', 'subtask', - 'xmap', 'xstarmap', 'uuid', 'VERSION', '__version__', + 'Celery', 'bugreport', 'shared_task', 'task', + 'current_app', 'current_task', 'maybe_signature', + 'chain', 'chord', 'chunks', 'group', 'signature', + 'xmap', 'xstarmap', 'uuid', 'version', '__version__', ] -VERSION_BANNER = '%s (%s)' % (__version__, SERIES) +VERSION_BANNER = '{0} ({1})'.format(__version__, SERIES) # -eof meta- +import os +import sys +if os.environ.get('C_IMPDEBUG'): # pragma: no cover + from .five import builtins + real_import = builtins.__import__ + + def debug_import(name, locals=None, globals=None, + fromlist=None, level=-1): + glob = globals or getattr(sys, 'emarfteg_'[::-1])(1).f_globals + importer_name = glob and glob.get('__name__') or 'unknown' + print('-- {0} imports {1}'.format(importer_name, name)) + return real_import(name, locals, globals, fromlist, level) + builtins.__import__ = debug_import + +# This is never executed, but tricks static analyzers (PyDev, PyCharm, +# pylint, etc.) into knowing the types of these symbols, and what +# they contain. STATICA_HACK = True globals()['kcah_acitats'[::-1].upper()] = False -if STATICA_HACK: - # This is never executed, but tricks static analyzers (PyDev, PyCharm, - # pylint, etc.) into knowing the types of these symbols, and what - # they contain. - from celery.app.base import Celery - from celery.app.utils import bugreport - from celery.app.task import Task - from celery._state import current_app, current_task - from celery.canvas import ( - chain, chord, chunks, group, subtask, xmap, xstarmap, +if STATICA_HACK: # pragma: no cover + from celery.app import shared_task # noqa + from celery.app.base import Celery # noqa + from celery.app.utils import bugreport # noqa + from celery.app.task import Task # noqa + from celery._state import current_app, current_task # noqa + from celery.canvas import ( # noqa + chain, chord, chunks, group, + signature, maybe_signature, xmap, xstarmap, subtask, ) - from celery.utils import uuid + from celery.utils import uuid # noqa + +# Eventlet/gevent patching must happen before importing +# anything else, so these tools must be at top-level. + + +def _find_option_with_arg(argv, short_opts=None, long_opts=None): + """Search argv for option specifying its short and longopt + alternatives. + + Return the value of the option if found. + + """ + for i, arg in enumerate(argv): + if arg.startswith('-'): + if long_opts and arg.startswith('--'): + name, _, val = arg.partition('=') + if name in long_opts: + return val + if short_opts and arg in short_opts: + return argv[i + 1] + raise KeyError('|'.join(short_opts or [] + long_opts or [])) + + +def _patch_eventlet(): + import eventlet + import eventlet.debug + eventlet.monkey_patch() + EVENTLET_DBLOCK = int(os.environ.get('EVENTLET_NOBLOCK', 0)) + if EVENTLET_DBLOCK: + eventlet.debug.hub_blocking_detection(EVENTLET_DBLOCK) + + +def _patch_gevent(): + from gevent import monkey, version_info + monkey.patch_all() + if version_info[0] == 0: # pragma: no cover + # Signals aren't working in gevent versions <1.0, + # and are not monkey patched by patch_all() + from gevent import signal as _gevent_signal + _signal = __import__('signal') + _signal.signal = _gevent_signal + + +def maybe_patch_concurrency(argv=sys.argv, + short_opts=['-P'], long_opts=['--pool'], + patches={'eventlet': _patch_eventlet, + 'gevent': _patch_gevent}): + """With short and long opt alternatives that specify the command line + option to set the pool, this makes sure that anything that needs + to be patched is completed as early as possible. + (e.g. eventlet/gevent monkey patches).""" + try: + pool = _find_option_with_arg(argv, short_opts, long_opts) + except KeyError: + pass + else: + try: + patcher = patches[pool] + except KeyError: + pass + else: + patcher() + # set up eventlet/gevent environments ASAP. + from celery import concurrency + concurrency.get_implementation(pool) # Lazy loading -from .__compat__ import recreate_module +from .five import recreate_module old_module, new_module = recreate_module( # pragma: no cover __name__, @@ -49,7 +130,8 @@ old_module, new_module = recreate_module( # pragma: no cover 'celery.app.task': ['Task'], 'celery._state': ['current_app', 'current_task'], 'celery.canvas': ['chain', 'chord', 'chunks', 'group', - 'subtask', 'xmap', 'xstarmap'], + 'signature', 'maybe_signature', 'subtask', + 'xmap', 'xstarmap'], 'celery.utils': ['uuid'], }, direct={'task': 'celery.task'}, @@ -58,4 +140,6 @@ old_module, new_module = recreate_module( # pragma: no cover __author__=__author__, __contact__=__contact__, __homepage__=__homepage__, __docformat__=__docformat__, VERSION=VERSION, SERIES=SERIES, VERSION_BANNER=VERSION_BANNER, + maybe_patch_concurrency=maybe_patch_concurrency, + _find_option_with_arg=_find_option_with_arg, ) diff --git a/awx/lib/site-packages/celery/__main__.py b/awx/lib/site-packages/celery/__main__.py index 9f0576593d..7fc3121154 100644 --- a/awx/lib/site-packages/celery/__main__.py +++ b/awx/lib/site-packages/celery/__main__.py @@ -2,10 +2,25 @@ from __future__ import absolute_import import sys +from os.path import basename -def maybe_patch_concurrency(): - from celery.platforms import maybe_patch_concurrency - maybe_patch_concurrency(sys.argv, ['-P'], ['--pool']) +from . import maybe_patch_concurrency + +__all__ = ['main'] + +DEPRECATED_FMT = """ +The {old!r} command is deprecated, please use {new!r} instead: + +$ {new_argv} + +""" + + +def _warn_deprecated(new): + print(DEPRECATED_FMT.format( + old=basename(sys.argv[0]), new=new, + new_argv=' '.join([new] + sys.argv[1:])), + ) def main(): @@ -16,21 +31,24 @@ def main(): def _compat_worker(): maybe_patch_concurrency() - from celery.bin.celeryd import main + _warn_deprecated('celery worker') + from celery.bin.worker import main main() def _compat_multi(): maybe_patch_concurrency() - from celery.bin.celeryd_multi import main + _warn_deprecated('celery multi') + from celery.bin.multi import main main() def _compat_beat(): maybe_patch_concurrency() - from celery.bin.celerybeat import main + _warn_deprecated('celery beat') + from celery.bin.beat import main main() -if __name__ == '__main__': +if __name__ == '__main__': # pragma: no cover main() diff --git a/awx/lib/site-packages/celery/_state.py b/awx/lib/site-packages/celery/_state.py index 137e44e933..73367a1869 100644 --- a/awx/lib/site-packages/celery/_state.py +++ b/awx/lib/site-packages/celery/_state.py @@ -9,7 +9,7 @@ This module shouldn't be used directly. """ -from __future__ import absolute_import +from __future__ import absolute_import, print_function import os import sys @@ -19,12 +19,26 @@ import weakref from celery.local import Proxy from celery.utils.threads import LocalStack +__all__ = ['set_default_app', 'get_current_app', 'get_current_task', + 'get_current_worker_task', 'current_app', 'current_task'] + #: Global default app used when no current app. default_app = None #: List of all app instances (weakrefs), must not be used directly. _apps = set() +_task_join_will_block = False + + +def _set_task_join_will_block(blocks): + global _task_join_will_block + _task_join_will_block = True + + +def task_join_will_block(): + return _task_join_will_block + class _TLS(threading.local): #: Apps with the :attr:`~celery.app.base.BaseApp.set_as_current` attribute @@ -53,10 +67,11 @@ def _get_current_app(): return _tls.current_app or default_app C_STRICT_APP = os.environ.get('C_STRICT_APP') -if os.environ.get('C_STRICT_APP'): +if os.environ.get('C_STRICT_APP'): # pragma: no cover def get_current_app(): + raise Exception('USES CURRENT APP') import traceback - sys.stderr.write('USES CURRENT_APP\n') + print('-- USES CURRENT_APP', file=sys.stderr) # noqa+ traceback.print_stack(file=sys.stderr) return _get_current_app() else: diff --git a/awx/lib/site-packages/celery/app/__init__.py b/awx/lib/site-packages/celery/app/__init__.py index becee39d43..5ea07eca62 100644 --- a/awx/lib/site-packages/celery/app/__init__.py +++ b/awx/lib/site-packages/celery/app/__init__.py @@ -7,22 +7,29 @@ """ from __future__ import absolute_import -from __future__ import with_statement import os +from collections import Callable + from celery.local import Proxy from celery import _state -from celery._state import ( # noqa +from celery._state import ( set_default_app, get_current_app as current_app, get_current_task as current_task, _get_active_apps, + _task_stack, ) from celery.utils import gen_task_name from .builtins import shared_task as _shared_task -from .base import Celery, AppPickler # noqa +from .base import Celery, AppPickler + +__all__ = ['Celery', 'AppPickler', 'default_app', 'app_or_default', + 'bugreport', 'enable_trace', 'disable_trace', 'shared_task', + 'set_default_app', 'current_app', 'current_task', + 'push_current_task', 'pop_current_task'] #: Proxy always returning the app set as default. default_app = Proxy(lambda: _state.default_app) @@ -40,8 +47,18 @@ app_or_default = None default_loader = os.environ.get('CELERY_LOADER') or 'default' # XXX -def bugreport(): - return current_app().bugreport() +#: Function used to push a task to the thread local stack +#: keeping track of the currently executing task. +#: You must remember to pop the task after. +push_current_task = _task_stack.push + +#: Function used to pop a task from the thread local stack +#: keeping track of the currently executing task. +pop_current_task = _task_stack.pop + + +def bugreport(app=None): + return (app or current_app()).bugreport() def _app_or_default(app=None): @@ -84,8 +101,8 @@ App = Celery # XXX Compat def shared_task(*args, **kwargs): - """Task decorator that creates shared tasks, - and returns a proxy that always returns the task from the current apps + """Create shared tasks (decorator). + Will return a proxy that always takes the task from the current apps task registry. This can be used by library authors to create tasks that will work @@ -121,7 +138,7 @@ def shared_task(*args, **kwargs): with app._finalize_mutex: app._task_from_fun(fun, **options) - # Returns a proxy that always gets the task from the current + # Return a proxy that always gets the task from the current # apps task registry. def task_by_cons(): app = current_app() @@ -131,6 +148,6 @@ def shared_task(*args, **kwargs): return Proxy(task_by_cons) return __inner - if len(args) == 1 and callable(args[0]): + if len(args) == 1 and isinstance(args[0], Callable): return create_shared_task(**kwargs)(args[0]) return create_shared_task(*args, **kwargs) diff --git a/awx/lib/site-packages/celery/app/abstract.py b/awx/lib/site-packages/celery/app/abstract.py deleted file mode 100644 index 96ba1e7142..0000000000 --- a/awx/lib/site-packages/celery/app/abstract.py +++ /dev/null @@ -1,63 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.app.abstract - ~~~~~~~~~~~~~~~~~~~ - - Abstract class that takes default attribute values - from the configuration. - -""" -from __future__ import absolute_import - - -class from_config(object): - - def __init__(self, key=None): - self.key = key - - def get_key(self, attr): - return attr if self.key is None else self.key - - -class _configurated(type): - - def __new__(cls, name, bases, attrs): - attrs['__confopts__'] = dict((attr, spec.get_key(attr)) - for attr, spec in attrs.iteritems() - if isinstance(spec, from_config)) - inherit_from = attrs.get('inherit_confopts', ()) - for subcls in bases: - try: - attrs['__confopts__'].update(subcls.__confopts__) - except AttributeError: - pass - for subcls in inherit_from: - attrs['__confopts__'].update(subcls.__confopts__) - attrs = dict((k, v if not isinstance(v, from_config) else None) - for k, v in attrs.iteritems()) - return super(_configurated, cls).__new__(cls, name, bases, attrs) - - -class configurated(object): - __metaclass__ = _configurated - - def setup_defaults(self, kwargs, namespace='celery'): - confopts = self.__confopts__ - app, find = self.app, self.app.conf.find_value_for_key - - for attr, keyname in confopts.iteritems(): - try: - value = kwargs[attr] - except KeyError: - value = find(keyname, namespace) - else: - if value is None: - value = find(keyname, namespace) - setattr(self, attr, value) - - for attr_name, attr_value in kwargs.iteritems(): - if attr_name not in confopts and attr_value is not None: - setattr(self, attr_name, attr_value) - - def confopts_as_dict(self): - return dict((key, getattr(self, key)) for key in self.__confopts__) diff --git a/awx/lib/site-packages/celery/app/amqp.py b/awx/lib/site-packages/celery/app/amqp.py index 09787bcf63..850771636e 100644 --- a/awx/lib/site-packages/celery/app/amqp.py +++ b/awx/lib/site-packages/celery/app/amqp.py @@ -12,20 +12,25 @@ from datetime import timedelta from weakref import WeakValueDictionary from kombu import Connection, Consumer, Exchange, Producer, Queue -from kombu.common import entry_to_queue +from kombu.common import Broadcast from kombu.pools import ProducerPool from kombu.utils import cached_property, uuid from kombu.utils.encoding import safe_repr +from kombu.utils.functional import maybe_list from celery import signals +from celery.five import items, string_t from celery.utils.text import indent as textindent from . import app_or_default from . import routes as _routes +__all__ = ['AMQP', 'Queues', 'TaskProducer', 'TaskConsumer'] + #: Human readable queue declaration. QUEUE_FORMAT = """ -.> %(name)s exchange:%(exchange)s(%(exchange_type)s) binding:%(routing_key)s +.> {0.name:<16} exchange={0.exchange.name}({0.exchange.type}) \ +key={0.routing_key} """ @@ -46,15 +51,16 @@ class Queues(dict): _consume_from = None def __init__(self, queues=None, default_exchange=None, - create_missing=True, ha_policy=None): + create_missing=True, ha_policy=None, autoexchange=None): dict.__init__(self) self.aliases = WeakValueDictionary() self.default_exchange = default_exchange self.create_missing = create_missing self.ha_policy = ha_policy + self.autoexchange = Exchange if autoexchange is None else autoexchange if isinstance(queues, (tuple, list)): queues = dict((q.name, q) for q in queues) - for name, q in (queues or {}).iteritems(): + for name, q in items(queues or {}): self.add(q) if isinstance(q, Queue) else self.add_compat(name, **q) def __getitem__(self, name): @@ -79,11 +85,16 @@ class Queues(dict): def add(self, queue, **kwargs): """Add new queue. - :param queue: Name of the queue. - :keyword exchange: Name of the exchange. - :keyword routing_key: Binding key. - :keyword exchange_type: Type of exchange. - :keyword \*\*options: Additional declaration options. + The first argument can either be a :class:`kombu.Queue` instance, + or the name of a queue. If the former the rest of the keyword + arguments are ignored, and options are simply taken from the queue + instance. + + :param queue: :class:`kombu.Queue` instance or name of the queue. + :keyword exchange: (if named) specifies exchange name. + :keyword routing_key: (if named) specifies binding key. + :keyword exchange_type: (if named) specifies type of exchange. + :keyword \*\*options: (if named) Additional declaration options. """ if not isinstance(queue, Queue): @@ -102,7 +113,7 @@ class Queues(dict): options['routing_key'] = name if self.ha_policy is not None: self._set_ha_policy(options.setdefault('queue_arguments', {})) - q = self[name] = entry_to_queue(name, **options) + q = self[name] = Queue.from_dict(name, **options) return q def _set_ha_policy(self, args): @@ -117,13 +128,8 @@ class Queues(dict): active = self.consume_from if not active: return '' - info = [ - QUEUE_FORMAT.strip() % { - 'name': (name + ':').ljust(12), - 'exchange': q.exchange.name, - 'exchange_type': q.exchange.type, - 'routing_key': q.routing_key} - for name, q in sorted(active.iteritems())] + info = [QUEUE_FORMAT.strip().format(q) + for _, q in sorted(items(active))] if indent_first: return textindent('\n'.join(info), indent) return info[0] + '\n' + textindent('\n'.join(info[1:]), indent) @@ -136,23 +142,37 @@ class Queues(dict): self._consume_from[q.name] = q return q - def select_subset(self, wanted): + def select(self, include): """Sets :attr:`consume_from` by selecting a subset of the currently defined queues. - :param wanted: List of wanted queue names. + :param include: Names of queues to consume from. + Can be iterable or string. """ - if wanted: - self._consume_from = dict((name, self[name]) for name in wanted) + if include: + self._consume_from = dict((name, self[name]) + for name in maybe_list(include)) + select_subset = select # XXX compat - def select_remove(self, queue): - if self._consume_from is None: - self.select_subset(k for k in self if k != queue) - else: - self._consume_from.pop(queue, None) + def deselect(self, exclude): + """Deselect queues so that they will not be consumed from. + + :param exclude: Names of queues to avoid consuming from. + Can be iterable or string. + + """ + if exclude: + exclude = maybe_list(exclude) + if self._consume_from is None: + # using selection + return self.select(k for k in self if k not in exclude) + # using all queues + for queue in exclude: + self._consume_from.pop(queue, None) + select_remove = deselect # XXX compat def new_missing(self, name): - return Queue(name, Exchange(name), name) + return Queue(name, self.autoexchange(name), name) @property def consume_from(self): @@ -189,20 +209,30 @@ class TaskProducer(Producer): queue=None, now=None, retries=0, chord=None, callbacks=None, errbacks=None, routing_key=None, serializer=None, delivery_mode=None, compression=None, - declare=None, **kwargs): + reply_to=None, time_limit=None, soft_time_limit=None, + declare=None, headers=None, + send_before_publish=signals.before_task_publish.send, + before_receivers=signals.before_task_publish.receivers, + send_after_publish=signals.after_task_publish.send, + after_receivers=signals.after_task_publish.receivers, + send_task_sent=signals.task_sent.send, # XXX deprecated + sent_receivers=signals.task_sent.receivers, + **kwargs): """Send task message.""" + retry = self.retry if retry is None else retry qname = queue if queue is None and exchange is None: queue = self.default_queue if queue is not None: - if isinstance(queue, basestring): + if isinstance(queue, string_t): qname, queue = queue, self.queues[queue] else: qname = queue.name exchange = exchange or queue.exchange.name routing_key = routing_key or queue.routing_key - declare = declare or ([queue] if queue else []) + if declare is None and queue and not isinstance(queue, Broadcast): + declare = [queue] # merge default and custom policy retry = self.retry if retry is None else retry @@ -218,9 +248,13 @@ class TaskProducer(Producer): if countdown: # Convert countdown to ETA. now = now or self.app.now() eta = now + timedelta(seconds=countdown) + if self.utc: + eta = eta.replace(tzinfo=self.app.timezone) if isinstance(expires, (int, float)): now = now or self.app.now() expires = now + timedelta(seconds=expires) + if self.utc: + expires = expires.replace(tzinfo=self.app.timezone) eta = eta and eta.isoformat() expires = expires and expires.isoformat() @@ -235,21 +269,44 @@ class TaskProducer(Producer): 'utc': self.utc, 'callbacks': callbacks, 'errbacks': errbacks, + 'timelimit': (time_limit, soft_time_limit), 'taskset': group_id or taskset_id, 'chord': chord, } + if before_receivers: + send_before_publish( + sender=task_name, body=body, + exchange=exchange, + routing_key=routing_key, + declare=declare, + headers=headers, + properties=kwargs, + retry_policy=retry_policy, + ) + self.publish( body, exchange=exchange, routing_key=routing_key, serializer=serializer or self.serializer, compression=compression or self.compression, + headers=headers, retry=retry, retry_policy=_rp, + reply_to=reply_to, + correlation_id=task_id, delivery_mode=delivery_mode, declare=declare, **kwargs ) - signals.task_sent.send(sender=task_name, **body) + if after_receivers: + send_after_publish(sender=task_name, body=body, + exchange=exchange, routing_key=routing_key) + + if sent_receivers: # XXX deprecated + send_task_sent(sender=task_name, task_id=task_id, + task=task_name, args=task_args, + kwargs=task_kwargs, eta=eta, + taskset=group_id or taskset_id) if self.send_sent_event: evd = event_dispatcher or self.event_dispatcher exname = exchange or self.exchange @@ -306,7 +363,7 @@ class TaskConsumer(Consumer): accept = self.app.conf.CELERY_ACCEPT_CONTENT super(TaskConsumer, self).__init__( channel, - queues or self.app.amqp.queues.consume_from.values(), + queues or list(self.app.amqp.queues.consume_from.values()), accept=accept, **kw ) @@ -329,13 +386,20 @@ class AMQP(object): #: set by the :attr:`producer_pool`. _producer_pool = None + # Exchange class/function used when defining automatic queues. + # E.g. you can use ``autoexchange = lambda n: None`` to use the + # amqp default exchange, which is a shortcut to bypass routing + # and instead send directly to the queue named in the routing key. + autoexchange = None + def __init__(self, app): self.app = app def flush_routes(self): self._rtable = _routes.prepare(self.app.conf.CELERY_ROUTES) - def Queues(self, queues, create_missing=None, ha_policy=None): + def Queues(self, queues, create_missing=None, ha_policy=None, + autoexchange=None): """Create new :class:`Queues` instance, using queue defaults from the current configuration.""" conf = self.app.conf @@ -347,10 +411,15 @@ class AMQP(object): queues = (Queue(conf.CELERY_DEFAULT_QUEUE, exchange=self.default_exchange, routing_key=conf.CELERY_DEFAULT_ROUTING_KEY), ) - return Queues(queues, self.default_exchange, create_missing, ha_policy) + autoexchange = (self.autoexchange if autoexchange is None + else autoexchange) + return Queues( + queues, self.default_exchange, create_missing, + ha_policy, autoexchange, + ) def Router(self, queues=None, create_missing=None): - """Returns the current task router.""" + """Return the current task router.""" return _routes.Router(self.routes, queues or self.queues, self.app.either('CELERY_CREATE_MISSING_QUEUES', create_missing), app=self.app) @@ -365,7 +434,7 @@ class AMQP(object): @cached_property def TaskProducer(self): - """Returns publisher used to send tasks. + """Return publisher used to send tasks. You should use `app.send_task` instead. diff --git a/awx/lib/site-packages/celery/app/annotations.py b/awx/lib/site-packages/celery/app/annotations.py index 3e29cb6a12..62ee2e72e0 100644 --- a/awx/lib/site-packages/celery/app/annotations.py +++ b/awx/lib/site-packages/celery/app/annotations.py @@ -12,15 +12,14 @@ """ from __future__ import absolute_import -from celery.utils.functional import firstmethod, mpromise +from celery.five import string_t +from celery.utils.functional import firstmethod, mlazy from celery.utils.imports import instantiate _first_match = firstmethod('annotate') _first_match_any = firstmethod('annotate_any') - -def resolve_all(anno, task): - return (r for r in (_first_match(anno, task), _first_match_any(anno)) if r) +__all__ = ['MapAnnotation', 'prepare', 'resolve_all'] class MapAnnotation(dict): @@ -44,8 +43,8 @@ def prepare(annotations): def expand_annotation(annotation): if isinstance(annotation, dict): return MapAnnotation(annotation) - elif isinstance(annotation, basestring): - return mpromise(instantiate, annotation) + elif isinstance(annotation, string_t): + return mlazy(instantiate, annotation) return annotation if annotations is None: @@ -53,3 +52,7 @@ def prepare(annotations): elif not isinstance(annotations, (list, tuple)): annotations = (annotations, ) return [expand_annotation(anno) for anno in annotations] + + +def resolve_all(anno, task): + return (x for x in (_first_match(anno, task), _first_match_any(anno)) if x) diff --git a/awx/lib/site-packages/celery/app/base.py b/awx/lib/site-packages/celery/app/base.py index d69dbc495b..790d197e63 100644 --- a/awx/lib/site-packages/celery/app/base.py +++ b/awx/lib/site-packages/celery/app/base.py @@ -7,36 +7,59 @@ """ from __future__ import absolute_import -from __future__ import with_statement import os import threading import warnings -from collections import deque +from collections import Callable, defaultdict, deque from contextlib import contextmanager from copy import deepcopy -from functools import wraps +from operator import attrgetter from billiard.util import register_after_fork from kombu.clocks import LamportClock -from kombu.utils import cached_property +from kombu.common import oid_from +from kombu.utils import cached_property, uuid from celery import platforms +from celery._state import ( + _task_stack, _tls, get_current_app, _register_app, get_current_worker_task, +) from celery.exceptions import AlwaysEagerIgnored, ImproperlyConfigured +from celery.five import items, values from celery.loaders import get_loader_cls from celery.local import PromiseProxy, maybe_evaluate -from celery._state import _task_stack, _tls, get_current_app, _register_app from celery.utils.functional import first, maybe_list from celery.utils.imports import instantiate, symbol_by_name +from celery.utils.log import ensure_process_aware_logger +from celery.utils.objects import mro_lookup from .annotations import prepare as prepare_annotations from .builtins import shared_task, load_shared_tasks from .defaults import DEFAULTS, find_deprecated_settings from .registry import TaskRegistry -from .utils import AppPickler, Settings, bugreport, _unpickle_app +from .utils import ( + AppPickler, Settings, bugreport, _unpickle_app, _unpickle_app_v2, appstr, +) + +__all__ = ['Celery'] _EXECV = os.environ.get('FORKED_BY_MULTIPROCESSING') +BUILTIN_FIXUPS = frozenset([ + 'celery.fixups.django:fixup', +]) + +ERR_ENVVAR_NOT_SET = """\ +The environment variable {0!r} is not set, +and as such the configuration could not be loaded. +Please set this variable and make it point to +a configuration module.""" + + +def app_has_custom(app, attr): + return mro_lookup(app.__class__, attr, stop=(Celery, object), + monkey_patched=[__name__]) def _unpickle_appattr(reverse_name, args): @@ -46,6 +69,7 @@ def _unpickle_appattr(reverse_name, args): class Celery(object): + #: This is deprecated, use :meth:`reduce_keys` instead Pickler = AppPickler SYSTEM = platforms.SYSTEM @@ -57,6 +81,7 @@ class Celery(object): loader_cls = 'celery.loaders.app:AppLoader' log_cls = 'celery.app.log:Logging' control_cls = 'celery.app.control:Control' + task_cls = 'celery.app.task:Task' registry_cls = TaskRegistry _pool = None @@ -64,8 +89,7 @@ class Celery(object): amqp=None, events=None, log=None, control=None, set_as_current=True, accept_magic_kwargs=False, tasks=None, broker=None, include=None, changes=None, - config_source=None, - **kwargs): + config_source=None, fixups=None, task_cls=None, **kwargs): self.clock = LamportClock() self.main = main self.amqp_cls = amqp or self.amqp_cls @@ -74,10 +98,13 @@ class Celery(object): self.loader_cls = loader or self.loader_cls self.log_cls = log or self.log_cls self.control_cls = control or self.control_cls + self.task_cls = task_cls or self.task_cls self.set_as_current = set_as_current self.registry_cls = symbol_by_name(self.registry_cls) self.accept_magic_kwargs = accept_magic_kwargs + self.user_options = defaultdict(set) self._config_source = config_source + self.steps = defaultdict(set) self.configured = False self._pending_defaults = deque() @@ -89,6 +116,11 @@ class Celery(object): if not isinstance(self._tasks, TaskRegistry): self._tasks = TaskRegistry(self._tasks or {}) + # If the class defins a custom __reduce_args__ we need to use + # the old way of pickling apps, which is pickling a list of + # args instead of the new way that pickles a dict of keywords. + self._using_v1_reduce = app_has_custom(self, '__reduce_args__') + # these options are moved to the config to # simplify pickling of the app object. self._preconf = changes or {} @@ -97,6 +129,11 @@ class Celery(object): if include: self._preconf['CELERY_IMPORTS'] = include + # Apply fixups. + self.fixups = set(fixups or ()) + for fixup in self.fixups | BUILTIN_FIXUPS: + symbol_by_name(fixup)(self) + if self.set_as_current: self.set_current() @@ -133,7 +170,7 @@ class Celery(object): def worker_main(self, argv=None): return instantiate( - 'celery.bin.celeryd:WorkerCommand', + 'celery.bin.worker:worker', app=self).execute_from_commandline(argv) def task(self, *args, **opts): @@ -145,8 +182,7 @@ class Celery(object): # the task instance from the current app. # Really need a better solution for this :( from . import shared_task as proxies_to_curapp - opts['_force_evaluate'] = True # XXX Py2.5 - return proxies_to_curapp(*args, **opts) + return proxies_to_curapp(*args, _force_evaluate=True, **opts) def inner_create_task_cls(shared=True, filter=None, **opts): _filt = filter # stupid 2to3 @@ -162,16 +198,20 @@ class Celery(object): task = filter(task) return task - # return a proxy object that is only evaluated when first used - promise = PromiseProxy(self._task_from_fun, (fun, ), opts) - self._pending.append(promise) + if self.finalized or opts.get('_force_evaluate'): + ret = self._task_from_fun(fun, **opts) + else: + # return a proxy object that evaluates on first use + ret = PromiseProxy(self._task_from_fun, (fun, ), opts, + __doc__=fun.__doc__) + self._pending.append(ret) if _filt: - return _filt(promise) - return promise + return _filt(ret) + return ret return _create_task_cls - if len(args) == 1 and callable(args[0]): + if len(args) == 1 and isinstance(args[0], Callable): return inner_create_task_cls(**opts)(*args) if args: raise TypeError( @@ -180,15 +220,16 @@ class Celery(object): def _task_from_fun(self, fun, **options): base = options.pop('base', None) or self.Task + bind = options.pop('bind', False) T = type(fun.__name__, (base, ), dict({ 'app': self, 'accept_magic_kwargs': False, - 'run': staticmethod(fun), + 'run': fun if bind else staticmethod(fun), + '_decorated': True, '__doc__': fun.__doc__, '__module__': fun.__module__}, **options))() task = self._tasks[T.name] # return global instance. - task.bind(self) return task def finalize(self): @@ -201,11 +242,11 @@ class Celery(object): while pending: maybe_evaluate(pending.popleft()) - for task in self._tasks.itervalues(): + for task in values(self._tasks): task.bind(self) def add_defaults(self, fun): - if not callable(fun): + if not isinstance(fun, Callable): d, fun = fun, lambda: d if self.configured: return self.conf.add_defaults(fun()) @@ -221,56 +262,83 @@ class Celery(object): if not module_name: if silent: return False - raise ImproperlyConfigured(self.error_envvar_not_set % module_name) + raise ImproperlyConfigured(ERR_ENVVAR_NOT_SET.format(module_name)) return self.config_from_object(module_name, silent=silent) def config_from_cmdline(self, argv, namespace='celery'): self.conf.update(self.loader.cmdline_config_parser(argv, namespace)) + def setup_security(self, allowed_serializers=None, key=None, cert=None, + store=None, digest='sha1', serializer='json'): + from celery.security import setup_security + return setup_security(allowed_serializers, key, cert, + store, digest, serializer, app=self) + + def autodiscover_tasks(self, packages, related_name='tasks'): + if self.conf.CELERY_FORCE_BILLIARD_LOGGING: + # we'll use billiard's processName instead of + # multiprocessing's one in all the loggers + # created after this call + ensure_process_aware_logger() + + self.loader.autodiscover_tasks(packages, related_name) + def send_task(self, name, args=None, kwargs=None, countdown=None, eta=None, task_id=None, producer=None, connection=None, - result_cls=None, expires=None, queues=None, publisher=None, - link=None, link_error=None, - **options): + router=None, result_cls=None, expires=None, + publisher=None, link=None, link_error=None, + add_to_parent=True, reply_to=None, **options): + task_id = task_id or uuid() producer = producer or publisher # XXX compat - if self.conf.CELERY_ALWAYS_EAGER: # pragma: no cover + router = router or self.amqp.router + conf = self.conf + if conf.CELERY_ALWAYS_EAGER: # pragma: no cover warnings.warn(AlwaysEagerIgnored( 'CELERY_ALWAYS_EAGER has no effect on send_task')) - - result_cls = result_cls or self.AsyncResult - router = self.amqp.Router(queues) - options.setdefault('compression', - self.conf.CELERY_MESSAGE_COMPRESSION) options = router.route(options, name, args, kwargs) - with self.producer_or_acquire(producer) as producer: - return result_cls(producer.publish_task( - name, args, kwargs, - task_id=task_id, - countdown=countdown, eta=eta, - callbacks=maybe_list(link), - errbacks=maybe_list(link_error), - expires=expires, **options - )) + if connection: + producer = self.amqp.TaskProducer(connection) + with self.producer_or_acquire(producer) as P: + self.backend.on_task_call(P, task_id) + task_id = P.publish_task( + name, args, kwargs, countdown=countdown, eta=eta, + task_id=task_id, expires=expires, + callbacks=maybe_list(link), errbacks=maybe_list(link_error), + reply_to=reply_to or self.oid, **options + ) + result = (result_cls or self.AsyncResult)(task_id) + if add_to_parent: + parent = get_current_worker_task() + if parent: + parent.add_trail(result) + return result - def connection(self, hostname=None, userid=None, - password=None, virtual_host=None, port=None, ssl=None, - insist=None, connect_timeout=None, transport=None, - transport_options=None, heartbeat=None, **kwargs): + def connection(self, hostname=None, userid=None, password=None, + virtual_host=None, port=None, ssl=None, + connect_timeout=None, transport=None, + transport_options=None, heartbeat=None, + login_method=None, failover_strategy=None, **kwargs): conf = self.conf return self.amqp.Connection( - hostname or conf.BROKER_HOST, + hostname or conf.BROKER_URL, userid or conf.BROKER_USER, password or conf.BROKER_PASSWORD, virtual_host or conf.BROKER_VHOST, port or conf.BROKER_PORT, transport=transport or conf.BROKER_TRANSPORT, - insist=self.either('BROKER_INSIST', insist), ssl=self.either('BROKER_USE_SSL', ssl), - connect_timeout=self.either( - 'BROKER_CONNECTION_TIMEOUT', connect_timeout), heartbeat=heartbeat, - transport_options=dict(conf.BROKER_TRANSPORT_OPTIONS, - **transport_options or {})) + login_method=login_method or conf.BROKER_LOGIN_METHOD, + failover_strategy=( + failover_strategy or conf.BROKER_FAILOVER_STRATEGY + ), + transport_options=dict( + conf.BROKER_TRANSPORT_OPTIONS, **transport_options or {} + ), + connect_timeout=self.either( + 'BROKER_CONNECTION_TIMEOUT', connect_timeout + ), + ) broker_connection = connection @contextmanager @@ -296,26 +364,6 @@ class Celery(object): yield producer default_producer = producer_or_acquire # XXX compat - def with_default_connection(self, fun): - """With any function accepting a `connection` - keyword argument, establishes a default connection if one is - not already passed to it. - - Any automatically established connection will be closed after - the function returns. - - **Deprecated** - - Use ``with app.connection_or_acquire(connection)`` instead. - - """ - @wraps(fun) - def _inner(*args, **kwargs): - connection = kwargs.pop('connection', None) - with self.connection_or_acquire(connection) as c: - return fun(*args, **dict(kwargs, connection=c)) - return _inner - def prepare_config(self, c): """Prepare configuration before it is merged with the defaults.""" return find_deprecated_settings(c) @@ -339,7 +387,7 @@ class Celery(object): ) def select_queues(self, queues=None): - return self.amqp.queues.select_subset(queues) + return self.amqp.queues.select(queues) def either(self, default_key, *values): """Fallback to the value of a configuration key if none of the @@ -356,7 +404,12 @@ class Celery(object): self.loader) return backend(app=self, url=url) + def on_configure(self): + """Callback calld when the app loads configuration""" + pass + def _get_config(self): + self.on_configure() self.configured = True s = Settings({}, [self.prepare_config(self.loader.conf), deepcopy(DEFAULTS)]) @@ -364,9 +417,9 @@ class Celery(object): # load lazy config dict initializers. pending = self._pending_defaults while pending: - s.add_defaults(pending.popleft()()) + s.add_defaults(maybe_evaluate(pending.popleft()())) if self._preconf: - for key, value in self._preconf.iteritems(): + for key, value in items(self._preconf): setattr(s, key, value) return s @@ -382,14 +435,20 @@ class Celery(object): amqp._producer_pool.force_close_all() amqp._producer_pool = None + def signature(self, *args, **kwargs): + kwargs['app'] = self + return self.canvas.signature(*args, **kwargs) + def create_task_cls(self): """Creates a base task class using default configuration taken from this app.""" - return self.subclass_with_self('celery.app.task:Task', name='Task', - attribute='_app', abstract=True) + return self.subclass_with_self( + self.task_cls, name='Task', attribute='_app', + keep_reduce=True, abstract=True, + ) def subclass_with_self(self, Class, name=None, attribute='app', - reverse=None, **kw): + reverse=None, keep_reduce=False, **kw): """Subclass an app-compatible class by setting its app attribute to be this app instance. @@ -410,18 +469,24 @@ class Celery(object): return _unpickle_appattr, (reverse, self.__reduce_args__()) attrs = dict({attribute: self}, __module__=Class.__module__, - __doc__=Class.__doc__, __reduce__=__reduce__, **kw) + __doc__=Class.__doc__, **kw) + if not keep_reduce: + attrs['__reduce__'] = __reduce__ return type(name or Class.__name__, (Class, ), attrs) def _rgetattr(self, path): - return reduce(getattr, [self] + path.split('.')) + return attrgetter(path)(self) def __repr__(self): - return '<%s %s:0x%x>' % (self.__class__.__name__, - self.main or '__main__', id(self), ) + return '<{0} {1}>'.format(type(self).__name__, appstr(self)) def __reduce__(self): + if self._using_v1_reduce: + return self.__reduce_v1__() + return (_unpickle_app_v2, (self.__class__, self.__reduce_keys__())) + + def __reduce_v1__(self): # Reduce only pickles the configuration changes, # so the default configuration doesn't have to be passed # between processes. @@ -430,11 +495,30 @@ class Celery(object): (self.__class__, self.Pickler) + self.__reduce_args__(), ) + def __reduce_keys__(self): + """Return keyword arguments used to reconstruct the object + when unpickling.""" + return { + 'main': self.main, + 'changes': self.conf.changes, + 'loader': self.loader_cls, + 'backend': self.backend_cls, + 'amqp': self.amqp_cls, + 'events': self.events_cls, + 'log': self.log_cls, + 'control': self.control_cls, + 'accept_magic_kwargs': self.accept_magic_kwargs, + 'fixups': self.fixups, + 'config_source': self._config_source, + 'task_cls': self.task_cls, + } + def __reduce_args__(self): - return (self.main, self.conf.changes, self.loader_cls, - self.backend_cls, self.amqp_cls, self.events_cls, - self.log_cls, self.control_cls, self.accept_magic_kwargs, - self._config_source) + """Deprecated method, please use :meth:`__reduce_keys__` instead.""" + return (self.main, self.conf.changes, + self.loader_cls, self.backend_cls, self.amqp_cls, + self.events_cls, self.log_cls, self.control_cls, + self.accept_magic_kwargs, self._config_source) @cached_property def Worker(self): @@ -448,10 +532,6 @@ class Celery(object): def Beat(self, **kwargs): return self.subclass_with_self('celery.apps.beat:Beat') - @cached_property - def TaskSet(self): - return self.subclass_with_self('celery.task.sets:TaskSet') - @cached_property def Task(self): return self.create_task_cls() @@ -464,12 +544,22 @@ class Celery(object): def AsyncResult(self): return self.subclass_with_self('celery.result:AsyncResult') + @cached_property + def ResultSet(self): + return self.subclass_with_self('celery.result:ResultSet') + @cached_property def GroupResult(self): return self.subclass_with_self('celery.result:GroupResult') + @cached_property + def TaskSet(self): # XXX compat + """Deprecated! Please use :class:`celery.group` instead.""" + return self.subclass_with_self('celery.task.sets:TaskSet') + @cached_property def TaskSetResult(self): # XXX compat + """Deprecated! Please use :attr:`GroupResult` instead.""" return self.subclass_with_self('celery.result:TaskSetResult') @property @@ -484,6 +574,10 @@ class Celery(object): def current_task(self): return _task_stack.top + @cached_property + def oid(self): + return oid_from(self) + @cached_property def amqp(self): return instantiate(self.amqp_cls, app=self) @@ -512,8 +606,23 @@ class Celery(object): def log(self): return instantiate(self.log_cls, app=self) + @cached_property + def canvas(self): + from celery import canvas + return canvas + @cached_property def tasks(self): self.finalize() return self._tasks + + @cached_property + def timezone(self): + from celery.utils.timeutils import timezone + conf = self.conf + tz = conf.CELERY_TIMEZONE + if not tz: + return (timezone.get_timezone('UTC') if conf.CELERY_ENABLE_UTC + else timezone.local) + return timezone.get_timezone(self.conf.CELERY_TIMEZONE) App = Celery # compat diff --git a/awx/lib/site-packages/celery/app/builtins.py b/awx/lib/site-packages/celery/app/builtins.py index 1938af21cc..f6ecba4acd 100644 --- a/awx/lib/site-packages/celery/app/builtins.py +++ b/awx/lib/site-packages/celery/app/builtins.py @@ -8,32 +8,35 @@ """ from __future__ import absolute_import -from __future__ import with_statement from collections import deque from celery._state import get_current_worker_task from celery.utils import uuid +__all__ = ['shared_task', 'load_shared_tasks'] + #: global list of functions defining tasks that should be #: added to all apps. -_shared_tasks = [] +_shared_tasks = set() def shared_task(constructor): - """Decorator that specifies that the decorated function is a function - that generates a built-in task. + """Decorator that specifies a function that generates a built-in task. The function will then be called for every new app instance created (lazily, so more exactly when the task registry for that app is needed). + + The function must take a single ``app`` argument. """ - _shared_tasks.append(constructor) + _shared_tasks.add(constructor) return constructor def load_shared_tasks(app): - """Loads the built-in tasks for an app instance.""" - for constructor in _shared_tasks: + """Create built-in tasks for an app instance.""" + constructors = set(_shared_tasks) + for constructor in constructors: constructor(app) @@ -42,17 +45,13 @@ def add_backend_cleanup_task(app): """The backend cleanup task can be used to clean up the default result backend. - This task is also added do the periodic task schedule so that it is - run every day at midnight, but :program:`celerybeat` must be running - for this to be effective. - - Note that not all backends do anything for this, what needs to be - done at cleanup is up to each backend, and some backends - may even clean up in realtime so that a periodic cleanup is not necessary. + If the configured backend requires periodic cleanup this task is also + automatically configured to run every day at midnight (requires + :program:`celery beat` to be running). """ - - @app.task(name='celery.backend_cleanup', _force_evaluate=True) + @app.task(name='celery.backend_cleanup', + shared=False, _force_evaluate=True) def backend_cleanup(): app.backend.cleanup() return backend_cleanup @@ -60,58 +59,62 @@ def add_backend_cleanup_task(app): @shared_task def add_unlock_chord_task(app): - """The unlock chord task is used by result backends that doesn't - have native chord support. + """This task is used by result backends without native chord support. - It creates a task chain polling the header for completion. + It joins chords by creating a task chain polling the header for completion. """ - from celery.canvas import subtask + from celery.canvas import signature from celery.exceptions import ChordError - from celery.result import from_serializable + from celery.result import result_from_tuple default_propagate = app.conf.CELERY_CHORD_PROPAGATES - @app.task(name='celery.chord_unlock', max_retries=None, + @app.task(name='celery.chord_unlock', max_retries=None, shared=False, default_retry_delay=1, ignore_result=True, _force_evaluate=True) def unlock_chord(group_id, callback, interval=None, propagate=None, max_retries=None, result=None, Result=app.AsyncResult, GroupResult=app.GroupResult, - from_serializable=from_serializable): + result_from_tuple=result_from_tuple): # if propagate is disabled exceptions raised by chord tasks # will be sent as part of the result list to the chord callback. # Since 3.1 propagate will be enabled by default, and instead # the chord callback changes state to FAILURE with the # exception set to ChordError. propagate = default_propagate if propagate is None else propagate + if interval is None: + interval = unlock_chord.default_retry_delay # check if the task group is ready, and if so apply the callback. deps = GroupResult( group_id, - [from_serializable(r, app=app) for r in result], + [result_from_tuple(r, app=app) for r in result], ) j = deps.join_native if deps.supports_native_join else deps.join if deps.ready(): - callback = subtask(callback) + callback = signature(callback, app=app) try: ret = j(propagate=propagate) - except Exception, exc: + except Exception as exc: try: - culprit = deps._failed_join_report().next() - reason = 'Dependency %s raised %r' % (culprit.id, exc) + culprit = next(deps._failed_join_report()) + reason = 'Dependency {0.id} raised {1!r}'.format( + culprit, exc, + ) except StopIteration: reason = repr(exc) + app._tasks[callback.task].backend.fail_from_current_stack( callback.id, exc=ChordError(reason), ) else: try: callback.delay(ret) - except Exception, exc: + except Exception as exc: app._tasks[callback.task].backend.fail_from_current_stack( callback.id, - exc=ChordError('Callback error: %r' % (exc, )), + exc=ChordError('Callback error: {0!r}'.format(exc)), ) else: return unlock_chord.retry(countdown=interval, @@ -121,23 +124,23 @@ def add_unlock_chord_task(app): @shared_task def add_map_task(app): - from celery.canvas import subtask + from celery.canvas import signature - @app.task(name='celery.map', _force_evaluate=True) + @app.task(name='celery.map', shared=False, _force_evaluate=True) def xmap(task, it): - task = subtask(task).type - return [task(value) for value in it] + task = signature(task, app=app).type + return [task(item) for item in it] return xmap @shared_task def add_starmap_task(app): - from celery.canvas import subtask + from celery.canvas import signature - @app.task(name='celery.starmap', _force_evaluate=True) + @app.task(name='celery.starmap', shared=False, _force_evaluate=True) def xstarmap(task, it): - task = subtask(task).type - return [task(*args) for args in it] + task = signature(task, app=app).type + return [task(*item) for item in it] return xstarmap @@ -145,7 +148,7 @@ def add_starmap_task(app): def add_chunk_task(app): from celery.canvas import chunks as _chunks - @app.task(name='celery.chunks', _force_evaluate=True) + @app.task(name='celery.chunks', shared=False, _force_evaluate=True) def chunks(task, it, n): return _chunks.apply_chunks(task, it, n) return chunks @@ -154,19 +157,20 @@ def add_chunk_task(app): @shared_task def add_group_task(app): _app = app - from celery.canvas import maybe_subtask, subtask - from celery.result import from_serializable + from celery.canvas import maybe_signature, signature + from celery.result import result_from_tuple class Group(app.Task): app = _app name = 'celery.group' accept_magic_kwargs = False + _decorated = True def run(self, tasks, result, group_id, partial_args): app = self.app - result = from_serializable(result, app) + result = result_from_tuple(result, app) # any partial args are added to all tasks in the group - taskit = (subtask(task).clone(partial_args) + taskit = (signature(task, app=app).clone(partial_args) for i, task in enumerate(tasks)) if self.request.is_eager or app.conf.CELERY_ALWAYS_EAGER: return app.GroupResult( @@ -178,30 +182,25 @@ def add_group_task(app): add_to_parent=False) for stask in taskit] parent = get_current_worker_task() if parent: - parent.request.children.append(result) + parent.add_trail(result) return result def prepare(self, options, tasks, args, **kwargs): - AsyncResult = self.AsyncResult options['group_id'] = group_id = ( options.setdefault('task_id', uuid())) def prepare_member(task): - task = maybe_subtask(task) - opts = task.options - opts['group_id'] = group_id - try: - tid = opts['task_id'] - except KeyError: - tid = opts['task_id'] = uuid() - return task, AsyncResult(tid) + task = maybe_signature(task, app=self.app) + task.options['group_id'] = group_id + return task, task.freeze() try: - tasks, results = zip(*[prepare_member(task) for task in tasks]) + tasks, res = list(zip( + *[prepare_member(task) for task in tasks] + )) except ValueError: # tasks empty - tasks, results = [], [] - return (tasks, self.app.GroupResult(group_id, results), - group_id, args) + tasks, res = [], [] + return (tasks, self.app.GroupResult(group_id, res), group_id, args) def apply_async(self, partial_args=(), kwargs={}, **options): if self.app.conf.CELERY_ALWAYS_EAGER: @@ -210,7 +209,7 @@ def add_group_task(app): options, args=partial_args, **kwargs ) super(Group, self).apply_async(( - list(tasks), result.serializable(), gid, args), **options + list(tasks), result.as_tuple(), gid, args), **options ) return result @@ -223,50 +222,55 @@ def add_group_task(app): @shared_task def add_chain_task(app): - from celery.canvas import Signature, chord, group, maybe_subtask + from celery.canvas import Signature, chord, group, maybe_signature _app = app class Chain(app.Task): app = _app name = 'celery.chain' accept_magic_kwargs = False + _decorated = True def prepare_steps(self, args, tasks): + app = self.app steps = deque(tasks) next_step = prev_task = prev_res = None tasks, results = [], [] i = 0 while steps: # First task get partial args from chain. - task = maybe_subtask(steps.popleft()) + task = maybe_signature(steps.popleft(), app=app) task = task.clone() if i else task.clone(args) - res = task._freeze() + res = task.freeze() i += 1 - if isinstance(task, group): + if isinstance(task, group) and steps and \ + not isinstance(steps[0], group): # automatically upgrade group(..) | s to chord(group, s) try: next_step = steps.popleft() # for chords we freeze by pretending it's a normal # task instead of a group. - res = Signature._freeze(task) + res = Signature.freeze(next_step) task = chord(task, body=next_step, task_id=res.task_id) except IndexError: - pass + pass # no callback, so keep as group if prev_task: # link previous task to this task. prev_task.link(task) # set the results parent attribute. - res.parent = prev_res + if not res.parent: + res.parent = prev_res - results.append(res) - tasks.append(task) + if not isinstance(prev_task, chord): + results.append(res) + tasks.append(task) prev_task, prev_res = task, res return tasks, results def apply_async(self, args=(), kwargs={}, group_id=None, chord=None, - task_id=None, **options): + task_id=None, link=None, link_error=None, **options): if self.app.conf.CELERY_ALWAYS_EAGER: return self.apply(args, kwargs, **options) options.pop('publisher', None) @@ -279,13 +283,24 @@ def add_chain_task(app): if task_id: tasks[-1].set(task_id=task_id) result = tasks[-1].type.AsyncResult(task_id) + # make sure we can do a link() and link_error() on a chain object. + if link: + tasks[-1].set(link=link) + # and if any task in the chain fails, call the errbacks + if link_error: + for task in tasks: + task.set(link_error=link_error) tasks[0].apply_async() return result - def apply(self, args=(), kwargs={}, subtask=maybe_subtask, **options): + def apply(self, args=(), kwargs={}, signature=maybe_signature, + **options): + app = self.app last, fargs = None, args # fargs passed to first task only for task in kwargs['tasks']: - res = subtask(task).clone(fargs).apply(last and (last.get(), )) + res = signature(task, app=app).clone(fargs).apply( + last and (last.get(), ), + ) res.parent, last, fargs = last, res, None return last return Chain @@ -294,10 +309,10 @@ def add_chain_task(app): @shared_task def add_chord_task(app): """Every chord is executed in a dedicated task, so that the chord - can be used as a subtask, and this generates the task + can be used as a signature, and this generates the task responsible for that.""" from celery import group - from celery.canvas import maybe_subtask + from celery.canvas import maybe_signature _app = app default_propagate = app.conf.CELERY_CHORD_PROPAGATES @@ -306,18 +321,22 @@ def add_chord_task(app): name = 'celery.chord' accept_magic_kwargs = False ignore_result = False + _decorated = True def run(self, header, body, partial_args=(), interval=None, countdown=1, max_retries=None, propagate=None, eager=False, **kwargs): + app = self.app propagate = default_propagate if propagate is None else propagate group_id = uuid() - AsyncResult = self.app.AsyncResult + AsyncResult = app.AsyncResult prepare_member = self._prepare_member # - convert back to group if serialized tasks = header.tasks if isinstance(header, group) else header - header = group([maybe_subtask(s).clone() for s in tasks]) + header = group([ + maybe_signature(s, app=app).clone() for s in tasks + ]) # - eager applies the group inline if eager: return header.apply(args=partial_args, task_id=group_id) @@ -333,8 +352,9 @@ def add_chord_task(app): propagate=propagate, result=results) # - call the header group, returning the GroupResult. - # XXX Python 2.5 doesn't allow kwargs after star-args. - return header(*partial_args, **{'task_id': group_id}) + final_res = header(*partial_args, task_id=group_id) + + return final_res def _prepare_member(self, task, body, group_id): opts = task.options @@ -346,23 +366,25 @@ def add_chord_task(app): opts.update(chord=body, group_id=group_id) return task_id - def apply_async(self, args=(), kwargs={}, task_id=None, **options): - if self.app.conf.CELERY_ALWAYS_EAGER: + def apply_async(self, args=(), kwargs={}, task_id=None, + group_id=None, chord=None, **options): + app = self.app + if app.conf.CELERY_ALWAYS_EAGER: return self.apply(args, kwargs, **options) - group_id = options.pop('group_id', None) - chord = options.pop('chord', None) header = kwargs.pop('header') body = kwargs.pop('body') - header, body = (list(maybe_subtask(header)), - maybe_subtask(body)) - if group_id: - body.set(group_id=group_id) - if chord: - body.set(chord=chord) - callback_id = body.options.setdefault('task_id', task_id or uuid()) + header, body = (list(maybe_signature(header, app=app)), + maybe_signature(body, app=app)) + # forward certain options to body + if chord is not None: + body.options['chord'] = chord + if group_id is not None: + body.options['group_id'] = group_id + [body.link(s) for s in options.pop('link', [])] + [body.link_error(s) for s in options.pop('link_error', [])] + body_result = body.freeze(task_id) parent = super(Chord, self).apply_async((header, body, args), kwargs, **options) - body_result = self.AsyncResult(callback_id) body_result.parent = parent return body_result @@ -370,6 +392,6 @@ def add_chord_task(app): body = kwargs['body'] res = super(Chord, self).apply(args, dict(kwargs, eager=True), **options) - return maybe_subtask(body).apply( + return maybe_signature(body, app=self.app).apply( args=(res.get(propagate=propagate).get(), )) return Chord diff --git a/awx/lib/site-packages/celery/app/control.py b/awx/lib/site-packages/celery/app/control.py index a3e8b23efa..fdd49a9d66 100644 --- a/awx/lib/site-packages/celery/app/control.py +++ b/awx/lib/site-packages/celery/app/control.py @@ -8,17 +8,32 @@ """ from __future__ import absolute_import -from __future__ import with_statement + +import warnings from kombu.pidbox import Mailbox from kombu.utils import cached_property -from . import app_or_default +from celery.exceptions import DuplicateNodenameWarning + +__all__ = ['Inspect', 'Control', 'flatten_reply'] + +W_DUPNODE = """\ +Received multiple replies from node name {0!r}. +Please make sure you give each node a unique nodename using the `-n` option.\ +""" def flatten_reply(reply): nodes = {} + seen = set() for item in reply: + dup = next((nodename in seen for nodename in item), None) + if dup: + warnings.warn(DuplicateNodenameWarning( + W_DUPNODE.format(dup), + )) + seen.update(item) nodes.update(item) return nodes @@ -58,6 +73,9 @@ class Inspect(object): def report(self): return self._request('report') + def clock(self): + return self._request('clock') + def active(self, safe=False): return self._request('dump_active', safe=safe) @@ -83,15 +101,30 @@ class Inspect(object): def active_queues(self): return self._request('active_queues') - def conf(self): - return self._request('dump_conf') + def query_task(self, ids): + return self._request('query_task', ids=ids) + + def conf(self, with_defaults=False): + return self._request('dump_conf', with_defaults=with_defaults) + + def hello(self, from_node, revoked=None): + return self._request('hello', from_node=from_node, revoked=revoked) + + def memsample(self): + return self._request('memsample') + + def memdump(self, samples=10): + return self._request('memdump', samples=samples) + + def objgraph(self, type='Request', n=200, max_depth=10): + return self._request('objgraph', num=n, max_depth=max_depth, type=type) class Control(object): Mailbox = Mailbox def __init__(self, app=None): - self.app = app_or_default(app) + self.app = app self.mailbox = self.Mailbox('celery', type='fanout', accept=self.app.conf.CELERY_ACCEPT_CONTENT) @@ -112,6 +145,11 @@ class Control(object): return self.app.amqp.TaskConsumer(conn).purge() discard_all = purge + def election(self, id, topic, action=None, connection=None): + self.broadcast('election', connection=connection, arguments={ + 'id': id, 'topic': topic, 'action': action, + }) + def revoke(self, task_id, destination=None, terminate=False, signal='SIGTERM', **kwargs): """Tell all (or specific) workers to revoke a task by id. @@ -136,7 +174,7 @@ class Control(object): def ping(self, destination=None, timeout=1, **kwargs): """Ping all (or specific) workers. - Returns answer from alive workers. + Will return the list of answers. See :meth:`broadcast` for supported keyword arguments. @@ -234,7 +272,7 @@ class Control(object): Supports the same arguments as :meth:`broadcast`. """ - return self.broadcast('pool_grow', {}, destination, **kwargs) + return self.broadcast('pool_grow', {'n': n}, destination, **kwargs) def pool_shrink(self, n=1, destination=None, **kwargs): """Tell all (or specific) workers to shrink the pool by ``n``. @@ -242,7 +280,7 @@ class Control(object): Supports the same arguments as :meth:`broadcast`. """ - return self.broadcast('pool_shrink', {}, destination, **kwargs) + return self.broadcast('pool_shrink', {'n': n}, destination, **kwargs) def broadcast(self, command, arguments=None, destination=None, connection=None, reply=False, timeout=1, limit=None, diff --git a/awx/lib/site-packages/celery/app/defaults.py b/awx/lib/site-packages/celery/app/defaults.py index 0cb1037256..1e004fdf2a 100644 --- a/awx/lib/site-packages/celery/app/defaults.py +++ b/awx/lib/site-packages/celery/app/defaults.py @@ -10,25 +10,28 @@ from __future__ import absolute_import import sys -from collections import deque +from collections import deque, namedtuple from datetime import timedelta +from celery.five import items from celery.utils import strtobool from celery.utils.functional import memoize +__all__ = ['Option', 'NAMESPACES', 'flatten', 'find'] + is_jython = sys.platform.startswith('java') is_pypy = hasattr(sys, 'pypy_version_info') -DEFAULT_POOL = 'processes' +DEFAULT_POOL = 'prefork' if is_jython: DEFAULT_POOL = 'threads' elif is_pypy: if sys.pypy_version_info[0:3] < (1, 5, 0): DEFAULT_POOL = 'solo' else: - DEFAULT_POOL = 'processes' - + DEFAULT_POOL = 'prefork' +DEFAULT_ACCEPT_CONTENT = ['json', 'pickle', 'msgpack', 'yaml'] DEFAULT_PROCESS_LOG_FMT = """ [%(asctime)s: %(levelname)s/%(processName)s] %(message)s """.strip() @@ -36,10 +39,13 @@ DEFAULT_LOG_FMT = '[%(asctime)s: %(levelname)s] %(message)s' DEFAULT_TASK_LOG_FMT = """[%(asctime)s: %(levelname)s/%(processName)s] \ %(task_name)s[%(task_id)s]: %(message)s""" -_BROKER_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0', 'alt': 'BROKER_URL'} +_BROKER_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0', + 'alt': 'BROKER_URL setting'} _REDIS_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0', 'alt': 'URL form of CELERY_RESULT_BACKEND'} +searchresult = namedtuple('searchresult', ('namespace', 'key', 'type')) + class Option(object): alt = None @@ -51,15 +57,15 @@ class Option(object): def __init__(self, default=None, *args, **kwargs): self.default = default self.type = kwargs.get('type') or 'string' - for attr, value in kwargs.iteritems(): + for attr, value in items(kwargs): setattr(self, attr, value) def to_python(self, value): return self.typemap[self.type](value) def __repr__(self): - return '%s default->%r>' % (self.type, self.default) - + return '{0} default->{1!r}>'.format(self.type, + self.default) NAMESPACES = { 'BROKER': { @@ -67,11 +73,11 @@ NAMESPACES = { 'CONNECTION_TIMEOUT': Option(4, type='float'), 'CONNECTION_RETRY': Option(True, type='bool'), 'CONNECTION_MAX_RETRIES': Option(100, type='int'), + 'FAILOVER_STRATEGY': Option(None, type='string'), 'HEARTBEAT': Option(None, type='int'), 'HEARTBEAT_CHECKRATE': Option(3.0, type='int'), + 'LOGIN_METHOD': Option(None, type='string'), 'POOL_LIMIT': Option(10, type='int'), - 'INSIST': Option(False, type='bool', - deprecate_by='2.4', remove_by='4.0'), 'USE_SSL': Option(False, type='bool'), 'TRANSPORT': Option(type='string'), 'TRANSPORT_OPTIONS': Option({}, type='dict'), @@ -90,24 +96,18 @@ NAMESPACES = { 'WRITE_CONSISTENCY': Option(type='string'), }, 'CELERY': { - 'ACCEPT_CONTENT': Option(None, type='any'), + 'ACCEPT_CONTENT': Option(DEFAULT_ACCEPT_CONTENT, type='list'), 'ACKS_LATE': Option(False, type='bool'), 'ALWAYS_EAGER': Option(False, type='bool'), - 'AMQP_TASK_RESULT_EXPIRES': Option( - type='float', deprecate_by='2.5', remove_by='4.0', - alt='CELERY_TASK_RESULT_EXPIRES' - ), - 'AMQP_TASK_RESULT_CONNECTION_MAX': Option( - 1, type='int', remove_by='2.5', alt='BROKER_POOL_LIMIT', - ), 'ANNOTATIONS': Option(type='any'), + 'FORCE_BILLIARD_LOGGING': Option(True, type='bool'), 'BROADCAST_QUEUE': Option('celeryctl'), 'BROADCAST_EXCHANGE': Option('celeryctl'), 'BROADCAST_EXCHANGE_TYPE': Option('fanout'), 'CACHE_BACKEND': Option(), 'CACHE_BACKEND_OPTIONS': Option({}, type='dict'), - # chord propagate will be True from v3.1 - 'CHORD_PROPAGATES': Option(False, type='bool'), + 'CHORD_PROPAGATES': Option(True, type='bool'), + 'COUCHBASE_BACKEND_SETTINGS': Option(None, type='dict'), 'CREATE_MISSING_QUEUES': Option(True, type='bool'), 'DEFAULT_RATE_LIMIT': Option(type='string'), 'DISABLE_RATE_LIMITS': Option(False, type='bool'), @@ -118,7 +118,10 @@ NAMESPACES = { 'DEFAULT_DELIVERY_MODE': Option(2, type='string'), 'EAGER_PROPAGATES_EXCEPTIONS': Option(False, type='bool'), 'ENABLE_UTC': Option(True, type='bool'), + 'ENABLE_REMOTE_CONTROL': Option(True, type='bool'), 'EVENT_SERIALIZER': Option('json'), + 'EVENT_QUEUE_EXPIRES': Option(None, type='float'), + 'EVENT_QUEUE_TTL': Option(None, type='float'), 'IMPORTS': Option((), type='tuple'), 'INCLUDE': Option((), type='tuple'), 'IGNORE_RESULT': Option(False, type='bool'), @@ -132,20 +135,18 @@ NAMESPACES = { 'REDIS_MAX_CONNECTIONS': Option(type='int'), 'RESULT_BACKEND': Option(type='string'), 'RESULT_DB_SHORT_LIVED_SESSIONS': Option(False, type='bool'), + 'RESULT_DB_TABLENAMES': Option(type='dict'), 'RESULT_DBURI': Option(), 'RESULT_ENGINE_OPTIONS': Option(type='dict'), 'RESULT_EXCHANGE': Option('celeryresults'), 'RESULT_EXCHANGE_TYPE': Option('direct'), 'RESULT_SERIALIZER': Option('pickle'), - 'RESULT_PERSISTENT': Option(False, type='bool'), + 'RESULT_PERSISTENT': Option(None, type='bool'), 'ROUTES': Option(type='any'), 'SEND_EVENTS': Option(False, type='bool'), 'SEND_TASK_ERROR_EMAILS': Option(False, type='bool'), 'SEND_TASK_SENT_EVENT': Option(False, type='bool'), 'STORE_ERRORS_EVEN_IF_IGNORED': Option(False, type='bool'), - 'TASK_ERROR_WHITELIST': Option( - (), type='tuple', deprecate_by='2.5', remove_by='4.0', - ), 'TASK_PUBLISH_RETRY': Option(True, type='bool'), 'TASK_PUBLISH_RETRY_POLICY': Option({ 'max_retries': 3, @@ -166,22 +167,21 @@ NAMESPACES = { 'WORKER_DIRECT': Option(False, type='bool'), }, 'CELERYD': { - 'AUTOSCALER': Option('celery.worker.autoscale.Autoscaler'), - 'AUTORELOADER': Option('celery.worker.autoreload.Autoreloader'), - 'BOOT_STEPS': Option((), type='tuple'), + 'AGENT': Option(None, type='string'), + 'AUTOSCALER': Option('celery.worker.autoscale:Autoscaler'), + 'AUTORELOADER': Option('celery.worker.autoreload:Autoreloader'), 'CONCURRENCY': Option(0, type='int'), 'TIMER': Option(type='string'), 'TIMER_PRECISION': Option(1.0, type='float'), 'FORCE_EXECV': Option(False, type='bool'), 'HIJACK_ROOT_LOGGER': Option(True, type='bool'), - 'CONSUMER': Option(type='string'), + 'CONSUMER': Option('celery.worker.consumer:Consumer', type='string'), 'LOG_FORMAT': Option(DEFAULT_PROCESS_LOG_FMT), 'LOG_COLOR': Option(type='bool'), 'LOG_LEVEL': Option('WARN', deprecate_by='2.4', remove_by='4.0', alt='--loglevel argument'), 'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0', alt='--logfile argument'), - 'MEDIATOR': Option('celery.worker.mediator.Mediator'), 'MAX_TASKS_PER_CHILD': Option(type='int'), 'POOL': Option(DEFAULT_POOL), 'POOL_PUTLOCKS': Option(True, type='bool'), @@ -195,7 +195,7 @@ NAMESPACES = { }, 'CELERYBEAT': { 'SCHEDULE': Option({}, type='dict'), - 'SCHEDULER': Option('celery.beat.PersistentScheduler'), + 'SCHEDULER': Option('celery.beat:PersistentScheduler'), 'SCHEDULE_FILENAME': Option('celerybeat-schedule'), 'MAX_LOOP_INTERVAL': Option(0, type='float'), 'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0', @@ -228,7 +228,7 @@ def flatten(d, ns=''): stack = deque([(ns, d)]) while stack: name, space = stack.popleft() - for key, value in space.iteritems(): + for key, value in items(space): if isinstance(value, dict): stack.append((name + key + '_', value)) else: @@ -240,10 +240,10 @@ def find_deprecated_settings(source): from celery.utils import warn_deprecated for name, opt in flatten(NAMESPACES): if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None): - warn_deprecated(description='The %r setting' % (name, ), + warn_deprecated(description='The {0!r} setting'.format(name), deprecation=opt.deprecate_by, removal=opt.remove_by, - alternative='Use %s instead' % (opt.alt, )) + alternative='Use the {0.alt} instead'.format(opt)) return source @@ -252,16 +252,18 @@ def find(name, namespace='celery'): # - Try specified namespace first. namespace = namespace.upper() try: - return namespace, name.upper(), NAMESPACES[namespace][name.upper()] + return searchresult( + namespace, name.upper(), NAMESPACES[namespace][name.upper()], + ) except KeyError: # - Try all the other namespaces. - for ns, keys in NAMESPACES.iteritems(): + for ns, keys in items(NAMESPACES): if ns.upper() == name.upper(): - return None, ns, keys + return searchresult(None, ns, keys) elif isinstance(keys, dict): try: - return ns, name.upper(), keys[name.upper()] + return searchresult(ns, name.upper(), keys[name.upper()]) except KeyError: pass # - See if name is a qualname last. - return None, name.upper(), DEFAULTS[name.upper()] + return searchresult(None, name.upper(), DEFAULTS[name.upper()]) diff --git a/awx/lib/site-packages/celery/app/log.py b/awx/lib/site-packages/celery/app/log.py index a776548836..53c467669d 100644 --- a/awx/lib/site-packages/celery/app/log.py +++ b/awx/lib/site-packages/celery/app/log.py @@ -16,12 +16,15 @@ import logging import os import sys +from logging.handlers import WatchedFileHandler + from kombu.log import NullHandler +from kombu.utils.encoding import set_default_encoding_file from celery import signals from celery._state import get_current_task +from celery.five import class_property, string_t from celery.utils import isatty -from celery.utils.compat import WatchedFileHandler from celery.utils.log import ( get_logger, mlevel, ColorFormatter, ensure_process_aware_logger, @@ -30,7 +33,7 @@ from celery.utils.log import ( ) from celery.utils.term import colored -is_py3k = sys.version_info[0] == 3 +__all__ = ['TaskFormatter', 'Logging'] MP_LOG = os.environ.get('MP_LOG', False) @@ -67,28 +70,33 @@ class Logging(object): loglevel, logfile, colorize=colorize, ) if not handled: - logger = get_logger('celery.redirected') if redirect_stdouts: - self.redirect_stdouts_to_logger(logger, - loglevel=redirect_level) + self.redirect_stdouts(redirect_level) os.environ.update( CELERY_LOG_LEVEL=str(loglevel) if loglevel else '', CELERY_LOG_FILE=str(logfile) if logfile else '', - CELERY_LOG_REDIRECT='1' if redirect_stdouts else '', - CELERY_LOG_REDIRECT_LEVEL=str(redirect_level), + ) + return handled + + def redirect_stdouts(self, loglevel=None, name='celery.redirected'): + self.redirect_stdouts_to_logger( + get_logger(name), loglevel=loglevel + ) + os.environ.update( + CELERY_LOG_REDIRECT='1', + CELERY_LOG_REDIRECT_LEVEL=str(loglevel or ''), ) def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, colorize=None, **kwargs): - if Logging._setup: + if self.already_setup: return - Logging._setup = True + self.already_setup = True loglevel = mlevel(loglevel or self.loglevel) format = format or self.format colorize = self.supports_color(colorize, logfile) reset_multiprocessing_logger() - if not is_py3k: - ensure_process_aware_logger() + ensure_process_aware_logger() receivers = signals.setup_logging.send( sender=None, loglevel=loglevel, logfile=logfile, format=format, colorize=colorize, @@ -121,14 +129,19 @@ class Logging(object): # then setup the root task logger. self.setup_task_loggers(loglevel, logfile, colorize=colorize) + try: + stream = logging.getLogger().handlers[0].stream + except (AttributeError, IndexError): + pass + else: + set_default_encoding_file(stream) + # This is a hack for multiprocessing's fork+exec, so that # logging before Process.run works. - logfile_name = logfile if isinstance(logfile, basestring) else '' - os.environ.update( - _MP_FORK_LOGLEVEL_=str(loglevel), - _MP_FORK_LOGFILE_=logfile_name, - _MP_FORK_LOGFORMAT_=format, - ) + logfile_name = logfile if isinstance(logfile, string_t) else '' + os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), + _MP_FORK_LOGFILE_=logfile_name, + _MP_FORK_LOGFORMAT_=format) return receivers def _configure_logger(self, logger, logfile, loglevel, @@ -145,7 +158,7 @@ class Logging(object): If `logfile` is not specified, then `sys.stderr` is used. - Returns logger object. + Will return the base task logger object. """ loglevel = mlevel(loglevel or self.loglevel) @@ -229,3 +242,11 @@ class Logging(object): def get_default_logger(self, name='celery', **kwargs): return get_logger(name) + + @class_property + def already_setup(cls): + return cls._setup + + @already_setup.setter # noqa + def already_setup(cls, was_setup): + cls._setup = was_setup diff --git a/awx/lib/site-packages/celery/app/registry.py b/awx/lib/site-packages/celery/app/registry.py index 2acbe4f2c3..7046554d92 100644 --- a/awx/lib/site-packages/celery/app/registry.py +++ b/awx/lib/site-packages/celery/app/registry.py @@ -10,7 +10,13 @@ from __future__ import absolute_import import inspect +from importlib import import_module + +from celery._state import get_current_app from celery.exceptions import NotRegistered +from celery.five import items + +__all__ = ['TaskRegistry'] class TaskRegistry(dict): @@ -51,10 +57,15 @@ class TaskRegistry(dict): return self.filter_types('periodic') def filter_types(self, type): - return dict((name, task) for name, task in self.iteritems() + return dict((name, task) for name, task in items(self) if getattr(task, 'type', 'regular') == type) def _unpickle_task(name): - from celery import current_app - return current_app.tasks[name] + return get_current_app().tasks[name] + + +def _unpickle_task_v2(name, module=None): + if module: + import_module(module) + return get_current_app().tasks[name] diff --git a/awx/lib/site-packages/celery/app/routes.py b/awx/lib/site-packages/celery/app/routes.py index 015b148792..d654f9d705 100644 --- a/awx/lib/site-packages/celery/app/routes.py +++ b/awx/lib/site-packages/celery/app/routes.py @@ -10,10 +10,13 @@ from __future__ import absolute_import from celery.exceptions import QueueNotFound +from celery.five import string_t from celery.utils import lpmerge -from celery.utils.functional import firstmethod, mpromise +from celery.utils.functional import firstmethod, mlazy from celery.utils.imports import instantiate +__all__ = ['MapRoute', 'Router', 'prepare'] + _first_route = firstmethod('route_for_task') @@ -24,9 +27,10 @@ class MapRoute(object): self.map = map def route_for_task(self, task, *args, **kwargs): - route = self.map.get(task) - if route: - return dict(route) + try: + return dict(self.map[task]) + except KeyError: + pass class Router(object): @@ -51,7 +55,7 @@ class Router(object): def expand_destination(self, route): # Route can be a queue name: convenient for direct exchanges. - if isinstance(route, basestring): + if isinstance(route, string_t): queue, route = route, {} else: # can use defaults from configured queue, but override specific @@ -62,13 +66,8 @@ class Router(object): try: Q = self.queues[queue] # noqa except KeyError: - if not self.create_missing: - raise QueueNotFound( - 'Queue %r is not defined in CELERY_QUEUES' % queue) - for key in 'exchange', 'routing_key': - if route.get(key) is None: - route[key] = queue - Q = self.app.amqp.queues.add(queue, **route) + raise QueueNotFound( + 'Queue {0!r} missing from CELERY_QUEUES'.format(queue)) # needs to be declared by publisher route['queue'] = Q return route @@ -83,8 +82,8 @@ def prepare(routes): def expand_route(route): if isinstance(route, dict): return MapRoute(route) - if isinstance(route, basestring): - return mpromise(instantiate, route) + if isinstance(route, string_t): + return mlazy(instantiate, route) return route if routes is None: diff --git a/awx/lib/site-packages/celery/app/task.py b/awx/lib/site-packages/celery/app/task.py index 94282ecbf2..0c1f3c96c8 100644 --- a/awx/lib/site-packages/celery/app/task.py +++ b/awx/lib/site-packages/celery/app/task.py @@ -7,15 +7,18 @@ """ from __future__ import absolute_import -from __future__ import with_statement + +import sys + +from billiard.einfo import ExceptionInfo from celery import current_app from celery import states -from celery.__compat__ import class_property -from celery._state import get_current_worker_task, _task_stack -from celery.canvas import subtask -from celery.datastructures import ExceptionInfo -from celery.exceptions import MaxRetriesExceededError, RetryTaskError +from celery._state import _task_stack +from celery.canvas import signature +from celery.exceptions import MaxRetriesExceededError, Reject, Retry +from celery.five import class_property, items, with_metaclass +from celery.local import Proxy from celery.result import EagerResult from celery.utils import gen_task_name, fun_takes_kwargs, uuid, maybe_reraise from celery.utils.functional import mattrgetter, maybe_list @@ -23,15 +26,57 @@ from celery.utils.imports import instantiate from celery.utils.mail import ErrorMail from .annotations import resolve_all as resolve_all_annotations -from .registry import _unpickle_task +from .registry import _unpickle_task_v2 +from .utils import appstr + +__all__ = ['Context', 'Task'] #: extracts attributes related to publishing a message from an object. extract_exec_options = mattrgetter( - 'queue', 'routing_key', 'exchange', - 'immediate', 'mandatory', 'priority', 'expires', - 'serializer', 'delivery_mode', 'compression', + 'queue', 'routing_key', 'exchange', 'priority', 'expires', + 'serializer', 'delivery_mode', 'compression', 'time_limit', + 'soft_time_limit', 'immediate', 'mandatory', # imm+man is deprecated ) +# We take __repr__ very seriously around here ;) +R_BOUND_TASK = '' +R_UNBOUND_TASK = '' +R_SELF_TASK = '<@task {0.name} bound to other {0.__self__}>' +R_INSTANCE = '<@task: {0.name} of {app}{flags}>' + + +class _CompatShared(object): + + def __init__(self, name, cons): + self.name = name + self.cons = cons + + def __hash__(self): + return hash(self.name) + + def __repr__(self): + return '' % (self.name, ) + + def __call__(self, app): + return self.cons(app) + + +def _strflags(flags, default=''): + if flags: + return ' ({0})'.format(', '.join(flags)) + return default + + +def _reprtask(task, fmt=None, flags=None): + flags = list(flags) if flags is not None else [] + flags.append('v2 compatible') if task.__v2_compat__ else None + if not fmt: + fmt = R_BOUND_TASK if task._app else R_UNBOUND_TASK + return fmt.format( + task, flags=_strflags(flags), + app=appstr(task._app) if task._app else None, + ) + class Context(object): # Default context @@ -45,7 +90,10 @@ class Context(object): eta = None expires = None is_eager = False + headers = None delivery_info = None + reply_to = None + correlation_id = None taskset = None # compat alias to group group = None chord = None @@ -53,7 +101,7 @@ class Context(object): called_directly = True callbacks = None errbacks = None - timeouts = None + timelimit = None _children = None # see property _protected = 0 @@ -61,19 +109,16 @@ class Context(object): self.update(*args, **kwargs) def update(self, *args, **kwargs): - self.__dict__.update(*args, **kwargs) + return self.__dict__.update(*args, **kwargs) def clear(self): - self.__dict__.clear() + return self.__dict__.clear() def get(self, key, default=None): - try: - return getattr(self, key) - except AttributeError: - return default + return getattr(self, key, default) def __repr__(self): - return '' % (vars(self, )) + return ''.format(vars(self)) @property def children(self): @@ -86,33 +131,62 @@ class Context(object): class TaskType(type): """Meta class for tasks. - Automatically registers the task in the task registry, except - if the `abstract` attribute is set. + Automatically registers the task in the task registry (except + if the :attr:`Task.abstract`` attribute is set). - If no `name` attribute is provided, then no name is automatically - set to the name of the module it was defined in, and the class name. + If no :attr:`Task.name` attribute is provided, then the name is generated + from the module and class name. """ + _creation_count = {} # used by old non-abstract task classes def __new__(cls, name, bases, attrs): new = super(TaskType, cls).__new__ task_module = attrs.get('__module__') or '__main__' # - Abstract class: abstract attribute should not be inherited. - if attrs.pop('abstract', None) or not attrs.get('autoregister', True): + abstract = attrs.pop('abstract', None) + if abstract or not attrs.get('autoregister', True): return new(cls, name, bases, attrs) # The 'app' attribute is now a property, with the real app located # in the '_app' attribute. Previously this was a regular attribute, # so we should support classes defining it. - _app1, _app2 = attrs.pop('_app', None), attrs.pop('app', None) - app = attrs['_app'] = _app1 or _app2 or current_app + app = attrs.pop('_app', None) or attrs.pop('app', None) + if not isinstance(app, Proxy) and app is None: + for base in bases: + if base._app: + app = base._app + break + else: + app = current_app._get_current_object() + attrs['_app'] = app # - Automatically generate missing/empty name. task_name = attrs.get('name') if not task_name: attrs['name'] = task_name = gen_task_name(app, name, task_module) + if not attrs.get('_decorated'): + # non decorated tasks must also be shared in case + # an app is created multiple times due to modules + # imported under multiple names. + # Hairy stuff, here to be compatible with 2.x. + # People should not use non-abstract task classes anymore, + # use the task decorator. + from celery.app.builtins import shared_task + unique_name = '.'.join([task_module, name]) + if unique_name not in cls._creation_count: + # the creation count is used as a safety + # so that the same task is not added recursively + # to the set of constructors. + cls._creation_count[unique_name] = 1 + shared_task(_CompatShared( + unique_name, + lambda app: TaskType.__new__(cls, name, bases, + dict(attrs, _app=app)), + )) + # - Create and register class. # Because of the way import happens (recursively) # we may or may not be the first time the task tries to register @@ -126,13 +200,10 @@ class TaskType(type): return instance.__class__ def __repr__(cls): - if cls._app: - return '' % (cls.__name__, cls._app, ) - if cls.__v2_compat__: - return '' % (cls.__name__, ) - return '' % (cls.__name__, ) + return _reprtask(cls) +@with_metaclass(TaskType) class Task(object): """Task base class. @@ -141,7 +212,6 @@ class Task(object): is overridden). """ - __metaclass__ = TaskType __trace__ = None __v2_compat__ = False # set by old base in celery.task.base @@ -185,6 +255,11 @@ class Task(object): #: setting. ignore_result = None + #: If enabled the request will keep track of subtasks started by + #: this task, and this information will be sent with the result + #: (``result.children``). + trail = True + #: When enabled errors will be stored even if the task is otherwise #: configured to ignore results. store_errors_even_if_ignored = None @@ -243,6 +318,8 @@ class Task(object): #: called. This should probably be deprecated. _default_request = None + _exec_options = None + __bound__ = False from_config = ( @@ -266,14 +343,14 @@ class Task(object): was_bound, self.__bound__ = self.__bound__, True self._app = app conf = app.conf + self._exec_options = None # clear option cache for attr_name, config_name in self.from_config: if getattr(self, attr_name, None) is None: setattr(self, attr_name, conf[config_name]) if self.accept_magic_kwargs is None: self.accept_magic_kwargs = app.accept_magic_kwargs - if self.backend is None: - self.backend = app.backend + self.backend = app.backend # decorate with annotations from config. if not was_bound: @@ -295,17 +372,19 @@ class Task(object): @classmethod def _get_app(self): - if not self.__bound__ or self._app is None: + if self._app is None: + self._app = current_app + if not self.__bound__: # The app property's __set__ method is not called # if Task.app is set (on the class), so must bind on use. - self.bind(current_app) + self.bind(self._app) return self._app app = class_property(_get_app, bind) @classmethod def annotate(self): for d in resolve_all_annotations(self.app.annotations, self): - for key, value in d.iteritems(): + for key, value in items(d): if key.startswith('@'): self.add_around(key[1:], value) else: @@ -332,17 +411,22 @@ class Task(object): self.pop_request() _task_stack.pop() - # - tasks are pickled into the name of the task only, and the reciever - # - simply grabs it from the local registry. def __reduce__(self): - return (_unpickle_task, (self.name, ), None) + # - tasks are pickled into the name of the task only, and the reciever + # - simply grabs it from the local registry. + # - in later versions the module of the task is also included, + # - and the receiving side tries to import that module so that + # - it will work even if the task has not been registered. + mod = type(self).__module__ + mod = mod if mod and mod in sys.modules else None + return (_unpickle_task_v2, (self.name, mod), None) def run(self, *args, **kwargs): """The body of the task executed by workers.""" raise NotImplementedError('Tasks must define the run method.') - def start_strategy(self, app, consumer): - return instantiate(self.Strategy, self, app, consumer) + def start_strategy(self, app, consumer, **kwargs): + return instantiate(self.Strategy, self, app, consumer, **kwargs) def delay(self, *args, **kwargs): """Star argument version of :meth:`apply_async`. @@ -357,10 +441,8 @@ class Task(object): """ return self.apply_async(args, kwargs) - def apply_async(self, args=None, kwargs=None, - task_id=None, producer=None, connection=None, router=None, - link=None, link_error=None, publisher=None, - add_to_parent=True, **options): + def apply_async(self, args=None, kwargs=None, task_id=None, producer=None, + link=None, link_error=None, **options): """Apply tasks asynchronously by sending a message. :keyword args: The positional arguments to pass on to the @@ -371,14 +453,12 @@ class Task(object): :keyword countdown: Number of seconds into the future that the task should execute. Defaults to immediate - execution (do not confuse with the - `immediate` flag, as they are unrelated). + execution. :keyword eta: A :class:`~datetime.datetime` object describing the absolute time and date of when the task should be executed. May not be specified if `countdown` - is also supplied. (Do not confuse this with the - `immediate` flag, as they are unrelated). + is also supplied. :keyword expires: Either a :class:`int`, describing the number of seconds, or a :class:`~datetime.datetime` object @@ -429,70 +509,56 @@ class Task(object): :func:`kombu.compression.register`. Defaults to the :setting:`CELERY_MESSAGE_COMPRESSION` setting. - :keyword link: A single, or a list of subtasks to apply if the + :keyword link: A single, or a list of tasks to apply if the task exits successfully. - :keyword link_error: A single, or a list of subtasks to apply + :keyword link_error: A single, or a list of tasks to apply if an error occurs while executing the task. :keyword producer: :class:~@amqp.TaskProducer` instance to use. :keyword add_to_parent: If set to True (default) and the task is applied while executing another task, then the result will be appended to the parent tasks ``request.children`` - attribute. + attribute. Trailing can also be disabled by default using the + :attr:`trail` attribute :keyword publisher: Deprecated alias to ``producer``. Also supports all keyword arguments supported by - :meth:`kombu.messaging.Producer.publish`. + :meth:`kombu.Producer.publish`. .. note:: If the :setting:`CELERY_ALWAYS_EAGER` setting is set, it will be replaced by a local :func:`apply` call instead. """ - producer = producer or publisher app = self._get_app() - router = router or self.app.amqp.router - conf = app.conf - - # add 'self' if this is a bound method. - if self.__self__ is not None: - args = (self.__self__, ) + tuple(args) - - if conf.CELERY_ALWAYS_EAGER: - return self.apply(args, kwargs, task_id=task_id, + if app.conf.CELERY_ALWAYS_EAGER: + return self.apply(args, kwargs, task_id=task_id or uuid(), link=link, link_error=link_error, **options) - options = dict(extract_exec_options(self), **options) - options = router.route(options, self.name, args, kwargs) - - if connection: - producer = app.amqp.TaskProducer(connection) - with app.producer_or_acquire(producer) as P: - task_id = P.publish_task(self.name, args, kwargs, - task_id=task_id, - callbacks=maybe_list(link), - errbacks=maybe_list(link_error), - **options) - result = self.AsyncResult(task_id) - if add_to_parent: - parent = get_current_worker_task() - if parent: - parent.request.children.append(result) - return result + # add 'self' if this is a "task_method". + if self.__self__ is not None: + args = args if isinstance(args, tuple) else tuple(args or ()) + args = (self.__self__, ) + args + return app.send_task( + self.name, args, kwargs, task_id=task_id, producer=producer, + link=link, link_error=link_error, result_cls=self.AsyncResult, + **dict(self._get_exec_options(), **options) + ) def subtask_from_request(self, request=None, args=None, kwargs=None, **extra_options): - request = self.request if request is None else request args = request.args if args is None else args kwargs = request.kwargs if kwargs is None else kwargs - delivery_info = request.delivery_info or {} - options = { + limit_hard, limit_soft = request.timelimit or (None, None) + options = dict({ 'task_id': request.id, 'link': request.callbacks, 'link_error': request.errbacks, - 'exchange': delivery_info.get('exchange'), - 'routing_key': delivery_info.get('routing_key') - } + 'group_id': request.group, + 'chord': request.chord, + 'soft_time_limit': limit_soft, + 'time_limit': limit_hard, + }, **request.delivery_info or {}) return self.subtask(args, kwargs, options, type=self, **extra_options) def retry(self, args=None, kwargs=None, exc=None, throw=True, @@ -503,7 +569,7 @@ class Task(object): :param kwargs: Keyword arguments to retry with. :keyword exc: Custom exception to report when the max restart limit has been exceeded (default: - :exc:`~celery.exceptions.MaxRetriesExceededError`). + :exc:`~@MaxRetriesExceededError`). If this argument is set and retry is called while an exception was raised (``sys.exc_info()`` is set) @@ -515,16 +581,19 @@ class Task(object): :keyword eta: Explicit time and date to run the retry at (must be a :class:`~datetime.datetime` instance). :keyword max_retries: If set, overrides the default retry limit. + :keyword time_limit: If set, overrides the default time limit. + :keyword soft_time_limit: If set, overrides the default soft + time limit. :keyword \*\*options: Any extra options to pass on to meth:`apply_async`. :keyword throw: If this is :const:`False`, do not raise the - :exc:`~celery.exceptions.RetryTaskError` exception, + :exc:`~@Retry` exception, that tells the worker to mark the task as being retried. Note that this means the task will be marked as failed if the task raises an exception, or successful if it returns. - :raises celery.exceptions.RetryTaskError: To tell the worker that + :raises celery.exceptions.Retry: To tell the worker that the task has been re-sent for retry. This always happens, unless the `throw` keyword argument has been explicitly set to :const:`False`, and is considered normal operation. @@ -533,17 +602,20 @@ class Task(object): .. code-block:: python - >>> @task() - >>> def tweet(auth, message): + >>> from imaginary_twitter_lib import Twitter + >>> from proj.celery import app + + >>> @app.task() + ... def tweet(auth, message): ... twitter = Twitter(oauth=auth) ... try: ... twitter.post_status_update(message) - ... except twitter.FailWhale, exc: + ... except twitter.FailWhale as exc: ... # Retry in 5 minutes. ... raise tweet.retry(countdown=60 * 5, exc=exc) Although the task will never return above as `retry` raises an - exception to notify the worker, we use `return` in front of the retry + exception to notify the worker, we use `raise` in front of the retry to convey that the rest of the block will not be executed. """ @@ -555,11 +627,12 @@ class Task(object): # so just raise the original exception. if request.called_directly: maybe_reraise() # raise orig stack if PyErr_Occurred - raise exc or RetryTaskError('Task can be retried', None) + raise exc or Retry('Task can be retried', None) if not eta and countdown is None: countdown = self.default_retry_delay + is_eager = request.is_eager S = self.subtask_from_request( request, args, kwargs, countdown=countdown, eta=eta, retries=retries, @@ -570,13 +643,18 @@ class Task(object): if exc: maybe_reraise() raise self.MaxRetriesExceededError( - """Can't retry %s[%s] args:%s kwargs:%s""" % ( + "Can't retry {0}[{1}] args:{2} kwargs:{3}".format( self.name, request.id, S.args, S.kwargs)) # If task was executed eagerly using apply(), # then the retry must also be executed eagerly. - S.apply().get() if request.is_eager else S.apply_async() - ret = RetryTaskError(exc=exc, when=eta or countdown) + try: + S.apply().get() if is_eager else S.apply_async() + except Exception as exc: + if is_eager: + raise + raise Reject(exc, requeue=True) + ret = Retry(exc=exc, when=eta or countdown) if throw: raise ret return ret @@ -595,7 +673,7 @@ class Task(object): """ # trace imports Task, so need to import inline. - from celery.task.trace import eager_trace_task + from celery.app.trace import eager_trace_task app = self._get_app() args = args or () @@ -629,12 +707,13 @@ class Task(object): 'delivery_info': {'is_eager': True}} supported_keys = fun_takes_kwargs(task.run, default_kwargs) extend_with = dict((key, val) - for key, val in default_kwargs.items() + for key, val in items(default_kwargs) if key in supported_keys) kwargs.update(extend_with) tb = None retval, info = eager_trace_task(task, task_id, args, kwargs, + app=self._get_app(), request=request, propagate=throw) if isinstance(retval, ExceptionInfo): retval, tb = retval.exception, retval.traceback @@ -651,10 +730,11 @@ class Task(object): task_name=self.name, **kwargs) def subtask(self, args=None, *starargs, **starkwargs): - """Returns :class:`~celery.subtask` object for + """Return :class:`~celery.signature` object for this task, wrapping arguments and execution options for a single task invocation.""" - return subtask(self, args, *starargs, **starkwargs) + starkwargs.setdefault('app', self.app) + return signature(self, args, *starargs, **starkwargs) def s(self, *args, **kwargs): """``.s(*a, **k) -> .subtask(a, k)``""" @@ -667,17 +747,17 @@ class Task(object): def chunks(self, it, n): """Creates a :class:`~celery.canvas.chunks` task for this task.""" from celery import chunks - return chunks(self.s(), it, n) + return chunks(self.s(), it, n, app=self.app) def map(self, it): """Creates a :class:`~celery.canvas.xmap` task from ``it``.""" from celery import xmap - return xmap(self.s(), it) + return xmap(self.s(), it, app=self.app) def starmap(self, it): """Creates a :class:`~celery.canvas.xstarmap` task from ``it``.""" from celery import xstarmap - return xstarmap(self.s(), it) + return xstarmap(self.s(), it, app=self.app) def update_state(self, task_id=None, state=None, meta=None): """Update task state. @@ -719,7 +799,7 @@ class Task(object): :param args: Original arguments for the retried task. :param kwargs: Original keyword arguments for the retried task. - :keyword einfo: :class:`~celery.datastructures.ExceptionInfo` + :keyword einfo: :class:`~billiard.einfo.ExceptionInfo` instance, containing the traceback. The return value of this handler is ignored. @@ -738,7 +818,7 @@ class Task(object): :param kwargs: Original keyword arguments for the task that failed. - :keyword einfo: :class:`~celery.datastructures.ExceptionInfo` + :keyword einfo: :class:`~billiard.einfo.ExceptionInfo` instance, containing the traceback. The return value of this handler is ignored. @@ -756,7 +836,7 @@ class Task(object): :param kwargs: Original keyword arguments for the task that failed. - :keyword einfo: :class:`~celery.datastructures.ExceptionInfo` + :keyword einfo: :class:`~billiard.einfo.ExceptionInfo` instance, containing the traceback (if any). The return value of this handler is ignored. @@ -769,6 +849,11 @@ class Task(object): not getattr(self, 'disable_error_emails', None): self.ErrorMail(self, **kwargs).send(context, exc) + def add_trail(self, result): + if self.trail: + self.request.children.append(result) + return result + def push_request(self, *args, **kwargs): self.request_stack.push(Context(*args, **kwargs)) @@ -777,9 +862,7 @@ class Task(object): def __repr__(self): """`repr(task)`""" - if self.__self__: - return '' % (self.name, self.__self__) - return '<@task: %s>' % (self.name, ) + return _reprtask(self, R_SELF_TASK if self.__self__ else R_INSTANCE) def _get_request(self): """Get current request object.""" @@ -793,6 +876,11 @@ class Task(object): return req request = property(_get_request) + def _get_exec_options(self): + if self._exec_options is None: + self._exec_options = extract_exec_options(self) + return self._exec_options + @property def __name__(self): return self.__class__.__name__ diff --git a/awx/lib/site-packages/celery/app/trace.py b/awx/lib/site-packages/celery/app/trace.py new file mode 100644 index 0000000000..cc28cb5bee --- /dev/null +++ b/awx/lib/site-packages/celery/app/trace.py @@ -0,0 +1,399 @@ +# -*- coding: utf-8 -*- +""" + celery.app.trace + ~~~~~~~~~~~~~~~~ + + This module defines how the task execution is traced: + errors are recorded, handlers are applied and so on. + +""" +from __future__ import absolute_import + +# ## --- +# This is the heart of the worker, the inner loop so to speak. +# It used to be split up into nice little classes and methods, +# but in the end it only resulted in bad performance and horrible tracebacks, +# so instead we now use one closure per task class. + +import os +import socket +import sys + +from warnings import warn + +from billiard.einfo import ExceptionInfo +from kombu.utils import kwdict + +from celery import current_app +from celery import states, signals +from celery._state import _task_stack +from celery.app import set_default_app +from celery.app.task import Task as BaseTask, Context +from celery.exceptions import Ignore, Reject, Retry +from celery.utils.log import get_logger +from celery.utils.objects import mro_lookup +from celery.utils.serialization import ( + get_pickleable_exception, + get_pickleable_etype, +) + +__all__ = ['TraceInfo', 'build_tracer', 'trace_task', 'eager_trace_task', + 'setup_worker_optimizations', 'reset_worker_optimizations'] + +_logger = get_logger(__name__) + +send_prerun = signals.task_prerun.send +send_postrun = signals.task_postrun.send +send_success = signals.task_success.send +STARTED = states.STARTED +SUCCESS = states.SUCCESS +IGNORED = states.IGNORED +REJECTED = states.REJECTED +RETRY = states.RETRY +FAILURE = states.FAILURE +EXCEPTION_STATES = states.EXCEPTION_STATES +IGNORE_STATES = frozenset([IGNORED, RETRY, REJECTED]) + +#: set by :func:`setup_worker_optimizations` +_tasks = None +_patched = {} + + +def task_has_custom(task, attr): + """Return true if the task or one of its bases + defines ``attr`` (excluding the one in BaseTask).""" + return mro_lookup(task.__class__, attr, stop=(BaseTask, object), + monkey_patched=['celery.app.task']) + + +class TraceInfo(object): + __slots__ = ('state', 'retval') + + def __init__(self, state, retval=None): + self.state = state + self.retval = retval + + def handle_error_state(self, task, eager=False): + store_errors = not eager + if task.ignore_result: + store_errors = task.store_errors_even_if_ignored + + return { + RETRY: self.handle_retry, + FAILURE: self.handle_failure, + }[self.state](task, store_errors=store_errors) + + def handle_retry(self, task, store_errors=True): + """Handle retry exception.""" + # the exception raised is the Retry semi-predicate, + # and it's exc' attribute is the original exception raised (if any). + req = task.request + type_, _, tb = sys.exc_info() + try: + reason = self.retval + einfo = ExceptionInfo((type_, reason, tb)) + if store_errors: + task.backend.mark_as_retry( + req.id, reason.exc, einfo.traceback, request=req, + ) + task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo) + signals.task_retry.send(sender=task, request=req, + reason=reason, einfo=einfo) + return einfo + finally: + del(tb) + + def handle_failure(self, task, store_errors=True): + """Handle exception.""" + req = task.request + type_, _, tb = sys.exc_info() + try: + exc = self.retval + einfo = ExceptionInfo() + einfo.exception = get_pickleable_exception(einfo.exception) + einfo.type = get_pickleable_etype(einfo.type) + if store_errors: + task.backend.mark_as_failure( + req.id, exc, einfo.traceback, request=req, + ) + task.on_failure(exc, req.id, req.args, req.kwargs, einfo) + signals.task_failure.send(sender=task, task_id=req.id, + exception=exc, args=req.args, + kwargs=req.kwargs, + traceback=tb, + einfo=einfo) + return einfo + finally: + del(tb) + + +def build_tracer(name, task, loader=None, hostname=None, store_errors=True, + Info=TraceInfo, eager=False, propagate=False, app=None, + IGNORE_STATES=IGNORE_STATES): + """Return a function that traces task execution; catches all + exceptions and updates result backend with the state and result + + If the call was successful, it saves the result to the task result + backend, and sets the task status to `"SUCCESS"`. + + If the call raises :exc:`~@Retry`, it extracts + the original exception, uses that as the result and sets the task state + to `"RETRY"`. + + If the call results in an exception, it saves the exception as the task + result, and sets the task state to `"FAILURE"`. + + Return a function that takes the following arguments: + + :param uuid: The id of the task. + :param args: List of positional args to pass on to the function. + :param kwargs: Keyword arguments mapping to pass on to the function. + :keyword request: Request dict. + + """ + # If the task doesn't define a custom __call__ method + # we optimize it away by simply calling the run method directly, + # saving the extra method call and a line less in the stack trace. + fun = task if task_has_custom(task, '__call__') else task.run + + loader = loader or app.loader + backend = task.backend + ignore_result = task.ignore_result + track_started = task.track_started + track_started = not eager and (task.track_started and not ignore_result) + publish_result = not eager and not ignore_result + hostname = hostname or socket.gethostname() + + loader_task_init = loader.on_task_init + loader_cleanup = loader.on_process_cleanup + + task_on_success = None + task_after_return = None + if task_has_custom(task, 'on_success'): + task_on_success = task.on_success + if task_has_custom(task, 'after_return'): + task_after_return = task.after_return + + store_result = backend.store_result + backend_cleanup = backend.process_cleanup + + pid = os.getpid() + + request_stack = task.request_stack + push_request = request_stack.push + pop_request = request_stack.pop + push_task = _task_stack.push + pop_task = _task_stack.pop + on_chord_part_return = backend.on_chord_part_return + + prerun_receivers = signals.task_prerun.receivers + postrun_receivers = signals.task_postrun.receivers + success_receivers = signals.task_success.receivers + + from celery import canvas + signature = canvas.maybe_signature # maybe_ does not clone if already + + def trace_task(uuid, args, kwargs, request=None): + R = I = None + kwargs = kwdict(kwargs) + try: + push_task(task) + task_request = Context(request or {}, args=args, + called_directly=False, kwargs=kwargs) + push_request(task_request) + try: + # -*- PRE -*- + if prerun_receivers: + send_prerun(sender=task, task_id=uuid, task=task, + args=args, kwargs=kwargs) + loader_task_init(uuid, task) + if track_started: + store_result( + uuid, {'pid': pid, 'hostname': hostname}, STARTED, + request=task_request, + ) + + # -*- TRACE -*- + try: + R = retval = fun(*args, **kwargs) + state = SUCCESS + except Reject as exc: + I, R = Info(REJECTED, exc), ExceptionInfo(internal=True) + state, retval = I.state, I.retval + except Ignore as exc: + I, R = Info(IGNORED, exc), ExceptionInfo(internal=True) + state, retval = I.state, I.retval + except Retry as exc: + I = Info(RETRY, exc) + state, retval = I.state, I.retval + R = I.handle_error_state(task, eager=eager) + except Exception as exc: + if propagate: + raise + I = Info(FAILURE, exc) + state, retval = I.state, I.retval + R = I.handle_error_state(task, eager=eager) + [signature(errback, app=app).apply_async((uuid, )) + for errback in task_request.errbacks or []] + except BaseException as exc: + raise + else: + # callback tasks must be applied before the result is + # stored, so that result.children is populated. + [signature(callback, app=app).apply_async((retval, )) + for callback in task_request.callbacks or []] + if publish_result: + store_result( + uuid, retval, SUCCESS, request=task_request, + ) + if task_on_success: + task_on_success(retval, uuid, args, kwargs) + if success_receivers: + send_success(sender=task, result=retval) + + # -* POST *- + if state not in IGNORE_STATES: + if task_request.chord: + on_chord_part_return(task) + if task_after_return: + task_after_return( + state, retval, uuid, args, kwargs, None, + ) + if postrun_receivers: + send_postrun(sender=task, task_id=uuid, task=task, + args=args, kwargs=kwargs, + retval=retval, state=state) + finally: + pop_task() + pop_request() + if not eager: + try: + backend_cleanup() + loader_cleanup() + except (KeyboardInterrupt, SystemExit, MemoryError): + raise + except Exception as exc: + _logger.error('Process cleanup failed: %r', exc, + exc_info=True) + except MemoryError: + raise + except Exception as exc: + if eager: + raise + R = report_internal_error(task, exc) + return R, I + + return trace_task + + +def trace_task(task, uuid, args, kwargs, request={}, **opts): + try: + if task.__trace__ is None: + task.__trace__ = build_tracer(task.name, task, **opts) + return task.__trace__(uuid, args, kwargs, request)[0] + except Exception as exc: + return report_internal_error(task, exc) + + +def _trace_task_ret(name, uuid, args, kwargs, request={}, app=None, **opts): + return trace_task((app or current_app).tasks[name], + uuid, args, kwargs, request, app=app, **opts) +trace_task_ret = _trace_task_ret + + +def _fast_trace_task(task, uuid, args, kwargs, request={}): + # setup_worker_optimizations will point trace_task_ret to here, + # so this is the function used in the worker. + return _tasks[task].__trace__(uuid, args, kwargs, request)[0] + + +def eager_trace_task(task, uuid, args, kwargs, request=None, **opts): + opts.setdefault('eager', True) + return build_tracer(task.name, task, **opts)( + uuid, args, kwargs, request) + + +def report_internal_error(task, exc): + _type, _value, _tb = sys.exc_info() + try: + _value = task.backend.prepare_exception(exc) + exc_info = ExceptionInfo((_type, _value, _tb), internal=True) + warn(RuntimeWarning( + 'Exception raised outside body: {0!r}:\n{1}'.format( + exc, exc_info.traceback))) + return exc_info + finally: + del(_tb) + + +def setup_worker_optimizations(app): + global _tasks + global trace_task_ret + + # make sure custom Task.__call__ methods that calls super + # will not mess up the request/task stack. + _install_stack_protection() + + # all new threads start without a current app, so if an app is not + # passed on to the thread it will fall back to the "default app", + # which then could be the wrong app. So for the worker + # we set this to always return our app. This is a hack, + # and means that only a single app can be used for workers + # running in the same process. + app.set_current() + set_default_app(app) + + # evaluate all task classes by finalizing the app. + app.finalize() + + # set fast shortcut to task registry + _tasks = app._tasks + + trace_task_ret = _fast_trace_task + from celery.worker import job as job_module + job_module.trace_task_ret = _fast_trace_task + job_module.__optimize__() + + +def reset_worker_optimizations(): + global trace_task_ret + trace_task_ret = _trace_task_ret + try: + delattr(BaseTask, '_stackprotected') + except AttributeError: + pass + try: + BaseTask.__call__ = _patched.pop('BaseTask.__call__') + except KeyError: + pass + from celery.worker import job as job_module + job_module.trace_task_ret = _trace_task_ret + + +def _install_stack_protection(): + # Patches BaseTask.__call__ in the worker to handle the edge case + # where people override it and also call super. + # + # - The worker optimizes away BaseTask.__call__ and instead + # calls task.run directly. + # - so with the addition of current_task and the request stack + # BaseTask.__call__ now pushes to those stacks so that + # they work when tasks are called directly. + # + # The worker only optimizes away __call__ in the case + # where it has not been overridden, so the request/task stack + # will blow if a custom task class defines __call__ and also + # calls super(). + if not getattr(BaseTask, '_stackprotected', False): + _patched['BaseTask.__call__'] = orig = BaseTask.__call__ + + def __protected_call__(self, *args, **kwargs): + stack = self.request_stack + req = stack.top + if req and not req._protected and \ + len(stack) == 1 and not req.called_directly: + req._protected = 1 + return self.run(*args, **kwargs) + return orig(self, *args, **kwargs) + BaseTask.__call__ = __protected_call__ + BaseTask._stackprotected = True diff --git a/awx/lib/site-packages/celery/app/utils.py b/awx/lib/site-packages/celery/app/utils.py index 2e857ac57e..5238cad0b6 100644 --- a/awx/lib/site-packages/celery/app/utils.py +++ b/awx/lib/site-packages/celery/app/utils.py @@ -12,22 +12,27 @@ import os import platform as _platform import re -from celery import platforms +from collections import Mapping + from celery.datastructures import ConfigurationView +from celery.five import items, string_t +from celery.platforms import pyimplementation from celery.utils.text import pretty from celery.utils.imports import qualname from .defaults import find +__all__ = ['appstr', 'Settings', 'filter_hidden_settings', 'bugreport'] + #: Format used to generate bugreport information. BUGREPORT_INFO = """ -software -> celery:%(celery_v)s kombu:%(kombu_v)s py:%(py_v)s - billiard:%(billiard_v)s %(driver_v)s -platform -> system:%(system)s arch:%(arch)s imp:%(py_i)s -loader -> %(loader)s -settings -> transport:%(transport)s results:%(results)s +software -> celery:{celery_v} kombu:{kombu_v} py:{py_v} + billiard:{billiard_v} {driver_v} +platform -> system:{system} arch:{arch} imp:{py_i} +loader -> {loader} +settings -> transport:{transport} results:{results} -%(human_settings)s +{human_settings} """ HIDDEN_SETTINGS = re.compile( @@ -36,6 +41,11 @@ HIDDEN_SETTINGS = re.compile( ) +def appstr(app): + """String used in __repr__ etc, to id app instances.""" + return '{0}:0x{1:x}'.format(app.main or '__main__', id(app)) + + class Settings(ConfigurationView): """Celery settings object.""" @@ -54,7 +64,7 @@ class Settings(ConfigurationView): return self.BROKER_TRANSPORT @property - def BROKER_HOST(self): + def BROKER_URL(self): return (os.environ.get('CELERY_BROKER_URL') or self.first('BROKER_URL', 'BROKER_HOST')) @@ -64,16 +74,20 @@ class Settings(ConfigurationView): return self.first('CELERY_TIMEZONE', 'TIME_ZONE') def without_defaults(self): - """Returns the current configuration, but without defaults.""" + """Return the current configuration, but without defaults.""" # the last stash is the default settings, so just skip that return Settings({}, self._order[:-1]) + def value_set_for(self, key): + return key in self.without_defaults() + def find_option(self, name, namespace='celery'): """Search for option by name. - Will return ``(namespace, option_name, Option)`` tuple, e.g.:: + Will return ``(namespace, key, type)`` tuple, e.g.:: - >>> celery.conf.find_option('disable_rate_limits') + >>> from proj.celery import app + >>> app.conf.find_option('disable_rate_limits') ('CELERY', 'DISABLE_RATE_LIMITS', bool default->False>)) @@ -88,28 +102,35 @@ class Settings(ConfigurationView): return self.get_by_parts(*self.find_option(name, namespace)[:-1]) def get_by_parts(self, *parts): - """Returns the current value for setting specified as a path. + """Return the current value for setting specified as a path. Example:: - >>> celery.conf.get_by_parts('CELERY', 'DISABLE_RATE_LIMITS') + >>> from proj.celery import app + >>> app.conf.get_by_parts('CELERY', 'DISABLE_RATE_LIMITS') False """ return self['_'.join(part for part in parts if part)] - def humanize(self): - """Returns a human readable string showing changes to the + def table(self, with_defaults=False, censored=True): + filt = filter_hidden_settings if censored else lambda v: v + return filt(dict( + (k, v) for k, v in items( + self if with_defaults else self.without_defaults()) + if k.isupper() and not k.startswith('_') + )) + + def humanize(self, with_defaults=False, censored=True): + """Return a human readable string showing changes to the configuration.""" return '\n'.join( - '%s: %s' % (key, pretty(value, width=50)) - for key, value in filter_hidden_settings(dict( - (k, v) for k, v in self.without_defaults().iteritems() - if k.isupper() and not k.startswith('_'))).iteritems()) + '{0}: {1}'.format(key, pretty(value, width=50)) + for key, value in items(self.table(with_defaults, censored))) class AppPickler(object): - """Default application pickler/unpickler.""" + """Old application pickler/unpickler (< 3.1).""" def __call__(self, cls, *args): kwargs = self.build_kwargs(*args) @@ -137,42 +158,56 @@ class AppPickler(object): def _unpickle_app(cls, pickler, *args): + """Rebuild app for versions 2.5+""" return pickler()(cls, *args) +def _unpickle_app_v2(cls, kwargs): + """Rebuild app for versions 3.1+""" + kwargs['set_as_current'] = False + return cls(**kwargs) + + def filter_hidden_settings(conf): - def maybe_censor(key, value): - return '********' if HIDDEN_SETTINGS.search(key) else value + def maybe_censor(key, value, mask='*' * 8): + if isinstance(value, Mapping): + return filter_hidden_settings(value) + if isinstance(value, string_t) and HIDDEN_SETTINGS.search(key): + return mask + if 'BROKER_URL' in key.upper(): + from kombu import Connection + return Connection(value).as_uri(mask=mask) + return value - return dict((k, maybe_censor(k, v)) for k, v in conf.iteritems()) + return dict((k, maybe_censor(k, v)) for k, v in items(conf)) def bugreport(app): - """Returns a string containing information useful in bug reports.""" + """Return a string containing information useful in bug reports.""" import billiard import celery import kombu try: conn = app.connection() - driver_v = '%s:%s' % (conn.transport.driver_name, - conn.transport.driver_version()) + driver_v = '{0}:{1}'.format(conn.transport.driver_name, + conn.transport.driver_version()) transport = conn.transport_cls except Exception: transport = driver_v = '' - return BUGREPORT_INFO % { - 'system': _platform.system(), - 'arch': ', '.join(p for p in _platform.architecture() if p), - 'py_i': platforms.pyimplementation(), - 'celery_v': celery.VERSION_BANNER, - 'kombu_v': kombu.__version__, - 'billiard_v': billiard.__version__, - 'py_v': _platform.python_version(), - 'driver_v': driver_v, - 'transport': transport, - 'results': app.conf.CELERY_RESULT_BACKEND or 'disabled', - 'human_settings': app.conf.humanize(), - 'loader': qualname(app.loader.__class__), - } + return BUGREPORT_INFO.format( + system=_platform.system(), + arch=', '.join(x for x in _platform.architecture() if x), + py_i=pyimplementation(), + celery_v=celery.VERSION_BANNER, + kombu_v=kombu.__version__, + billiard_v=billiard.__version__, + py_v=_platform.python_version(), + driver_v=driver_v, + transport=transport, + results=app.conf.CELERY_RESULT_BACKEND or 'disabled', + human_settings=app.conf.humanize(), + loader=qualname(app.loader.__class__), + ) diff --git a/awx/lib/site-packages/celery/apps/beat.py b/awx/lib/site-packages/celery/apps/beat.py index 8e1ac22f41..a2ca02234f 100644 --- a/awx/lib/site-packages/celery/apps/beat.py +++ b/awx/lib/site-packages/celery/apps/beat.py @@ -16,41 +16,46 @@ import socket import sys from celery import VERSION_BANNER, platforms, beat -from celery.app import app_or_default -from celery.app.abstract import configurated, from_config from celery.utils.imports import qualname from celery.utils.log import LOG_LEVELS, get_logger from celery.utils.timeutils import humanize_seconds +__all__ = ['Beat'] + STARTUP_INFO_FMT = """ Configuration -> - . broker -> %(conninfo)s - . loader -> %(loader)s - . scheduler -> %(scheduler)s -%(scheduler_info)s - . logfile -> %(logfile)s@%(loglevel)s - . maxinterval -> %(hmax_interval)s (%(max_interval)ss) + . broker -> {conninfo} + . loader -> {loader} + . scheduler -> {scheduler} +{scheduler_info} + . logfile -> {logfile}@%{loglevel} + . maxinterval -> {hmax_interval} ({max_interval}s) """.strip() logger = get_logger('celery.beat') -class Beat(configurated): +class Beat(object): Service = beat.Service - app = None - loglevel = from_config('log_level') - logfile = from_config('log_file') - schedule = from_config('schedule_filename') - scheduler_cls = from_config('scheduler') - redirect_stdouts = from_config() - redirect_stdouts_level = from_config() def __init__(self, max_interval=None, app=None, - socket_timeout=30, pidfile=None, no_color=None, **kwargs): - """Starts the celerybeat task scheduler.""" - self.app = app = app_or_default(app or self.app) - self.setup_defaults(kwargs, namespace='celerybeat') + socket_timeout=30, pidfile=None, no_color=None, + loglevel=None, logfile=None, schedule=None, + scheduler_cls=None, redirect_stdouts=None, + redirect_stdouts_level=None, **kwargs): + """Starts the beat task scheduler.""" + self.app = app = app or self.app + self.loglevel = self._getopt('log_level', loglevel) + self.logfile = self._getopt('log_file', logfile) + self.schedule = self._getopt('schedule_filename', schedule) + self.scheduler_cls = self._getopt('scheduler', scheduler_cls) + self.redirect_stdouts = self._getopt( + 'redirect_stdouts', redirect_stdouts, + ) + self.redirect_stdouts_level = self._getopt( + 'redirect_stdouts_level', redirect_stdouts_level, + ) self.max_interval = max_interval self.socket_timeout = socket_timeout @@ -64,9 +69,14 @@ class Beat(configurated): if not isinstance(self.loglevel, int): self.loglevel = LOG_LEVELS[self.loglevel.upper()] + def _getopt(self, key, value): + if value is not None: + return value + return self.app.conf.find_value_for_key(key, namespace='celerybeat') + def run(self): print(str(self.colored.cyan( - 'celerybeat v%s is starting.' % VERSION_BANNER))) + 'celery beat v{0} is starting.'.format(VERSION_BANNER)))) self.init_loader() self.set_process_title() self.start_scheduler() @@ -99,8 +109,8 @@ class Beat(configurated): try: self.install_sync_handler(beat) beat.start() - except Exception, exc: - logger.critical('celerybeat raised exception %s: %r', + except Exception as exc: + logger.critical('beat raised exception %s: %r', exc.__class__, exc, exc_info=True) @@ -112,26 +122,26 @@ class Beat(configurated): def startup_info(self, beat): scheduler = beat.get_scheduler(lazy=True) - return STARTUP_INFO_FMT % { - 'conninfo': self.app.connection().as_uri(), - 'logfile': self.logfile or '[stderr]', - 'loglevel': LOG_LEVELS[self.loglevel], - 'loader': qualname(self.app.loader), - 'scheduler': qualname(scheduler), - 'scheduler_info': scheduler.info, - 'hmax_interval': humanize_seconds(beat.max_interval), - 'max_interval': beat.max_interval, - } + return STARTUP_INFO_FMT.format( + conninfo=self.app.connection().as_uri(), + logfile=self.logfile or '[stderr]', + loglevel=LOG_LEVELS[self.loglevel], + loader=qualname(self.app.loader), + scheduler=qualname(scheduler), + scheduler_info=scheduler.info, + hmax_interval=humanize_seconds(beat.max_interval), + max_interval=beat.max_interval, + ) def set_process_title(self): arg_start = 'manage' in sys.argv[0] and 2 or 1 platforms.set_process_title( - 'celerybeat', info=' '.join(sys.argv[arg_start:]), + 'celery beat', info=' '.join(sys.argv[arg_start:]), ) def install_sync_handler(self, beat): """Install a `SIGTERM` + `SIGINT` handler that saves - the celerybeat schedule.""" + the beat schedule.""" def _sync(signum, frame): beat.sync() diff --git a/awx/lib/site-packages/celery/apps/worker.py b/awx/lib/site-packages/celery/apps/worker.py index 2d6d67c19a..15b47eae8f 100644 --- a/awx/lib/site-packages/celery/apps/worker.py +++ b/awx/lib/site-packages/celery/apps/worker.py @@ -10,52 +10,81 @@ platform tweaks, and so on. """ -from __future__ import absolute_import +from __future__ import absolute_import, print_function import logging import os import platform as _platform -import socket import sys import warnings from functools import partial -from billiard import cpu_count, current_process +from billiard import current_process from kombu.utils.encoding import safe_str from celery import VERSION_BANNER, platforms, signals -from celery.app import app_or_default -from celery.app.abstract import configurated, from_config -from celery.exceptions import ImproperlyConfigured, SystemTerminate +from celery.exceptions import CDeprecationWarning, SystemTerminate +from celery.five import string, string_t from celery.loaders.app import AppLoader -from celery.task import trace -from celery.utils import cry, isatty, worker_direct +from celery.app import trace +from celery.utils import cry, isatty from celery.utils.imports import qualname -from celery.utils.log import get_logger, mlevel, set_in_sighandler +from celery.utils.log import get_logger, in_sighandler, set_in_sighandler from celery.utils.text import pluralize from celery.worker import WorkController -try: - from greenlet import GreenletExit - IGNORE_ERRORS = (GreenletExit, ) -except ImportError: # pragma: no cover - IGNORE_ERRORS = () +__all__ = ['Worker'] logger = get_logger(__name__) is_jython = sys.platform.startswith('java') is_pypy = hasattr(sys, 'pypy_version_info') +C_FORCE_ROOT = os.environ.get('C_FORCE_ROOT', False) + +ROOT_DISALLOWED = """\ +Running a worker with superuser privileges when the +worker accepts messages serialized with pickle is a very bad idea! + +If you really want to continue then you have to set the C_FORCE_ROOT +environment variable (but please think about this before you do). +""" + +ROOT_DISCOURAGED = """\ +You are running the worker with superuser privileges, which is +absolutely not recommended! + +Please specify a different user using the -u option. +""" + +W_PICKLE_DEPRECATED = """ +Starting from version 3.2 Celery will refuse to accept pickle by default. + +The pickle serializer is a security concern as it may give attackers +the ability to execute any command. It's important to secure +your broker from unauthorized access when using pickle, so we think +that enabling pickle should require a deliberate action and not be +the default choice. + +If you depend on pickle then you should set a setting to disable this +warning and to be sure that everything will continue working +when you upgrade to Celery 3.2:: + + CELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml'] + +You must only enable the serializers that you will actually use. + +""" + def active_thread_count(): from threading import enumerate - # must use .getName on Python 2.5 return sum(1 for t in enumerate() - if not t.getName().startswith('Dummy-')) + if not t.name.startswith('Dummy-')) def safe_say(msg): - sys.__stderr__.write('\n%s\n' % msg) + print('\n{0}'.format(msg), file=sys.__stderr__) ARTLINES = [ ' --------------', @@ -73,149 +102,118 @@ ARTLINES = [ ] BANNER = """\ -celery@%(hostname)s v%(version)s +{hostname} v{version} -%(platform)s +{platform} [config] -.> broker: %(conninfo)s -.> app: %(app)s -.> concurrency: %(concurrency)s -.> events: %(events)s +.> broker: {conninfo} +.> app: {app} +.> concurrency: {concurrency} +.> events: {events} [queues] -%(queues)s +{queues} """ EXTRA_INFO_FMT = """ -[Tasks] -%(tasks)s -""" - -UNKNOWN_QUEUE = """\ -Trying to select queue subset of %r, but queue %s is not -defined in the CELERY_QUEUES setting. - -If you want to automatically declare unknown queues you can -enable the CELERY_CREATE_MISSING_QUEUES setting. +[tasks] +{tasks} """ -class Worker(configurated): - WorkController = WorkController +class Worker(WorkController): - app = None - inherit_confopts = (WorkController, ) - loglevel = from_config('log_level') - redirect_stdouts = from_config() - redirect_stdouts_level = from_config() - - def __init__(self, hostname=None, purge=False, beat=False, - queues=None, include=None, app=None, pidfile=None, - autoscale=None, autoreload=False, no_execv=False, - no_color=None, **kwargs): - self.app = app = app_or_default(app or self.app) - self.hostname = hostname or socket.gethostname() + def on_before_init(self, **kwargs): + trace.setup_worker_optimizations(self.app) # this signal can be used to set up configuration for # workers by name. - signals.celeryd_init.send(sender=self.hostname, instance=self, - conf=self.app.conf) + signals.celeryd_init.send( + sender=self.hostname, instance=self, + conf=self.app.conf, options=kwargs, + ) - self.setup_defaults(kwargs, namespace='celeryd') - if not self.concurrency: - try: - self.concurrency = cpu_count() - except NotImplementedError: - self.concurrency = 2 + def on_after_init(self, purge=False, no_color=None, + redirect_stdouts=None, redirect_stdouts_level=None, + **kwargs): + self.redirect_stdouts = self._getopt( + 'redirect_stdouts', redirect_stdouts, + ) + self.redirect_stdouts_level = self._getopt( + 'redirect_stdouts_level', redirect_stdouts_level, + ) + super(Worker, self).setup_defaults(**kwargs) self.purge = purge - self.beat = beat - self.use_queues = [] if queues is None else queues - self.queues = None - self.include = include - self.pidfile = pidfile - self.autoscale = None - self.autoreload = autoreload self.no_color = no_color - self.no_execv = no_execv - if autoscale: - max_c, _, min_c = autoscale.partition(',') - self.autoscale = [int(max_c), min_c and int(min_c) or 0] self._isatty = isatty(sys.stdout) - - self.colored = app.log.colored( + self.colored = self.app.log.colored( self.logfile, enabled=not no_color if no_color is not None else no_color ) - if isinstance(self.use_queues, basestring): - self.use_queues = self.use_queues.split(',') - if self.include: - if isinstance(self.include, basestring): - self.include = self.include.split(',') - app.conf.CELERY_INCLUDE = ( - tuple(app.conf.CELERY_INCLUDE) + tuple(self.include)) - self.loglevel = mlevel(self.loglevel) + def on_init_blueprint(self): + self._custom_logging = self.setup_logging() + # apply task execution optimizations + # -- This will finalize the app! + trace.setup_worker_optimizations(self.app) - def run(self): - self.init_queues() - self.app.loader.init_worker() + def on_start(self): + if not self._custom_logging and self.redirect_stdouts: + self.app.log.redirect_stdouts(self.redirect_stdouts_level) + + WorkController.on_start(self) # this signal can be used to e.g. change queues after # the -Q option has been applied. - signals.celeryd_after_setup.send(sender=self.hostname, instance=self, - conf=self.app.conf) + signals.celeryd_after_setup.send( + sender=self.hostname, instance=self, conf=self.app.conf, + ) if getattr(os, 'getuid', None) and os.getuid() == 0: - warnings.warn(RuntimeWarning( - 'Running celeryd with superuser privileges is discouraged!')) + accept_encoding = self.app.conf.CELERY_ACCEPT_CONTENT + if ('pickle' in accept_encoding or + 'application/x-python-serialize' in accept_encoding): + if not C_FORCE_ROOT: + raise RuntimeError(ROOT_DISALLOWED) + warnings.warn(RuntimeWarning(ROOT_DISCOURAGED)) + + if not self.app.conf.value_set_for('CELERY_ACCEPT_CONTENT'): + warnings.warn(CDeprecationWarning(W_PICKLE_DEPRECATED)) if self.purge: self.purge_messages() # Dump configuration to screen so we have some basic information # for when users sends bug reports. - print(str(self.colored.cyan(' \n', self.startup_info())) + - str(self.colored.reset(self.extra_info() or ''))) + sys.__stdout__.write( + str(self.colored.cyan(' \n', self.startup_info())) + + str(self.colored.reset(self.extra_info() or '')) + '\n' + ) self.set_process_status('-active-') - - self.setup_logging() - - # apply task execution optimizations - trace.setup_worker_optimizations(self.app) - - try: - self.run_worker() - except IGNORE_ERRORS: - pass + self.install_platform_tweaks(self) def on_consumer_ready(self, consumer): signals.worker_ready.send(sender=consumer) - print('celery@%s ready.' % safe_str(self.hostname)) - - def init_queues(self): - try: - self.app.select_queues(self.use_queues) - except KeyError, exc: - raise ImproperlyConfigured(UNKNOWN_QUEUE % (self.use_queues, exc)) - if self.app.conf.CELERY_WORKER_DIRECT: - self.app.amqp.queues.select_add(worker_direct(self.hostname)) + print('{0} ready.'.format(safe_str(self.hostname), )) def setup_logging(self, colorize=None): if colorize is None and self.no_color is not None: colorize = not self.no_color - self.app.log.setup(self.loglevel, self.logfile, - self.redirect_stdouts, self.redirect_stdouts_level, - colorize=colorize) + return self.app.log.setup( + self.loglevel, self.logfile, + redirect_stdouts=False, colorize=colorize, + ) def purge_messages(self): count = self.app.control.purge() - print('purge: Erased %d %s from the queue.\n' % ( - count, pluralize(count, 'message'))) + if count: + print('purge: Erased {0} {1} from the queue.\n'.format( + count, pluralize(count, 'message'))) def tasklist(self, include_builtins=True, sep='\n', int_='celery.'): return sep.join( - ' . %s' % task for task in sorted(self.app.tasks) + ' . {0}'.format(task) for task in sorted(self.app.tasks) if (not task.startswith(int_) if not include_builtins else task) ) @@ -223,38 +221,38 @@ class Worker(configurated): if self.loglevel <= logging.INFO: include_builtins = self.loglevel <= logging.DEBUG tasklist = self.tasklist(include_builtins=include_builtins) - return EXTRA_INFO_FMT % {'tasks': tasklist} + return EXTRA_INFO_FMT.format(tasks=tasklist) def startup_info(self): app = self.app - concurrency = unicode(self.concurrency) - appr = '%s:0x%x' % (app.main or '__main__', id(app)) + concurrency = string(self.concurrency) + appr = '{0}:0x{1:x}'.format(app.main or '__main__', id(app)) if not isinstance(app.loader, AppLoader): loader = qualname(app.loader) if loader.startswith('celery.loaders'): loader = loader[14:] - appr += ' (%s)' % loader + appr += ' ({0})'.format(loader) if self.autoscale: max, min = self.autoscale - concurrency = '{min=%s, max=%s}' % (min, max) + concurrency = '{{min={0}, max={1}}}'.format(min, max) pool = self.pool_cls - if not isinstance(pool, basestring): + if not isinstance(pool, string_t): pool = pool.__module__ - concurrency += ' (%s)' % pool.split('.')[-1] + concurrency += ' ({0})'.format(pool.split('.')[-1]) events = 'ON' if not self.send_events: events = 'OFF (enable -E to monitor this worker)' - banner = (BANNER % { - 'app': appr, - 'hostname': self.hostname, - 'version': VERSION_BANNER, - 'conninfo': self.app.connection().as_uri(), - 'concurrency': concurrency, - 'platform': safe_str(_platform.platform()), - 'events': events, - 'queues': app.amqp.queues.format(indent=0, indent_first=False), - }).splitlines() + banner = BANNER.format( + app=appr, + hostname=safe_str(self.hostname), + version=VERSION_BANNER, + conninfo=self.app.connection().as_uri(), + concurrency=concurrency, + platform=safe_str(_platform.platform()), + events=events, + queues=app.amqp.queues.format(indent=0, indent_first=False), + ).splitlines() # integrate the ASCII art. for i, x in enumerate(banner): @@ -264,20 +262,6 @@ class Worker(configurated): banner[i] = ' ' * 16 + banner[i] return '\n'.join(banner) + '\n' - def run_worker(self): - worker = self.WorkController( - app=self.app, - hostname=self.hostname, - ready_callback=self.on_consumer_ready, beat=self.beat, - autoscale=self.autoscale, autoreload=self.autoreload, - no_execv=self.no_execv, - pidfile=self.pidfile, - **self.confopts_as_dict() - ) - self.install_platform_tweaks(worker) - signals.worker_init.send(sender=worker) - worker.start() - def install_platform_tweaks(self, worker): """Install platform specific tweaks and workarounds.""" if self.app.IS_OSX: @@ -286,7 +270,7 @@ class Worker(configurated): # Install signal handler so SIGHUP restarts the worker. if not self._isatty: # only install HUP handler if detached from terminal, - # so closing the terminal window doesn't restart celeryd + # so closing the terminal window doesn't restart the worker # into the background. if self.app.IS_OSX: # OS X can't exec from a process using threads. @@ -307,7 +291,7 @@ class Worker(configurated): def set_process_status(self, info): return platforms.set_mp_process_title( 'celeryd', - info='%s (%s)' % (info, platforms.strargv(sys.argv)), + info='{0} ({1})'.format(info, platforms.strargv(sys.argv)), hostname=self.hostname, ) @@ -316,51 +300,44 @@ def _shutdown_handler(worker, sig='TERM', how='Warm', exc=SystemExit, callback=None): def _handle_request(*args): - set_in_sighandler(True) - try: + with in_sighandler(): from celery.worker import state if current_process()._name == 'MainProcess': if callback: callback(worker) - safe_say('celeryd: %s shutdown (MainProcess)' % how) + safe_say('worker: {0} shutdown (MainProcess)'.format(how)) if active_thread_count() > 1: setattr(state, {'Warm': 'should_stop', 'Cold': 'should_terminate'}[how], True) else: raise exc() - finally: - set_in_sighandler(False) _handle_request.__name__ = 'worker_' + how platforms.signals[sig] = _handle_request install_worker_term_handler = partial( _shutdown_handler, sig='SIGTERM', how='Warm', exc=SystemExit, ) -if not is_jython: +if not is_jython: # pragma: no cover install_worker_term_hard_handler = partial( _shutdown_handler, sig='SIGQUIT', how='Cold', exc=SystemTerminate, ) -else: +else: # pragma: no cover install_worker_term_handler = \ install_worker_term_hard_handler = lambda *a, **kw: None def on_SIGINT(worker): - safe_say('celeryd: Hitting Ctrl+C again will terminate all running tasks!') + safe_say('worker: Hitting Ctrl+C again will terminate all running tasks!') install_worker_term_hard_handler(worker, sig='SIGINT') -if not is_jython: +if not is_jython: # pragma: no cover install_worker_int_handler = partial( _shutdown_handler, sig='SIGINT', callback=on_SIGINT ) -else: +else: # pragma: no cover install_worker_int_handler = lambda *a, **kw: None -def _clone_current_worker(): - if os.fork() == 0: - platforms.close_open_fds([ - sys.__stdin__, sys.__stdout__, sys.__stderr__, - ]) - os.execv(sys.executable, [sys.executable] + sys.argv) +def _reload_current_worker(): + os.execv(sys.executable, [sys.executable] + sys.argv) def install_worker_restart_handler(worker, sig='SIGHUP'): @@ -368,27 +345,24 @@ def install_worker_restart_handler(worker, sig='SIGHUP'): def restart_worker_sig_handler(*args): """Signal handler restarting the current python program.""" set_in_sighandler(True) - safe_say('Restarting celeryd (%s)' % (' '.join(sys.argv), )) + safe_say('Restarting celery worker ({0})'.format(' '.join(sys.argv))) import atexit - atexit.register(_clone_current_worker) + atexit.register(_reload_current_worker) from celery.worker import state state.should_stop = True platforms.signals[sig] = restart_worker_sig_handler -def install_cry_handler(): +def install_cry_handler(sig='SIGUSR1'): # Jython/PyPy does not have sys._current_frames if is_jython or is_pypy: # pragma: no cover return def cry_handler(*args): """Signal handler logging the stacktrace of all active threads.""" - set_in_sighandler(True) - try: + with in_sighandler(): safe_say(cry()) - finally: - set_in_sighandler(False) - platforms.signals['SIGUSR1'] = cry_handler + platforms.signals[sig] = cry_handler def install_rdb_handler(envvar='CELERY_RDBSIG', @@ -396,24 +370,19 @@ def install_rdb_handler(envvar='CELERY_RDBSIG', def rdb_handler(*args): """Signal handler setting a rdb breakpoint at the current frame.""" - set_in_sighandler(True) - try: - _, frame = args - from celery.contrib import rdb - rdb.set_trace(frame) - finally: - set_in_sighandler(False) + with in_sighandler(): + from celery.contrib.rdb import set_trace, _frame + # gevent does not pass standard signal handler args + frame = args[1] if args else _frame().f_back + set_trace(frame) if os.environ.get(envvar): platforms.signals[sig] = rdb_handler def install_HUP_not_supported_handler(worker, sig='SIGHUP'): - def warn_on_HUP_handler(*args): - set_in_sighandler(True) - try: - safe_say('%(sig)s not supported: Restarting with %(sig)s is ' - 'unstable on this platform!' % {'sig': sig}) - finally: - set_in_sighandler(False) + def warn_on_HUP_handler(signum, frame): + with in_sighandler(): + safe_say('{sig} not supported: Restarting with {sig} is ' + 'unstable on this platform!'.format(sig=sig)) platforms.signals[sig] = warn_on_HUP_handler diff --git a/awx/lib/site-packages/celery/backends/__init__.py b/awx/lib/site-packages/celery/backends/__init__.py index 493827379b..421f7f480c 100644 --- a/awx/lib/site-packages/celery/backends/__init__.py +++ b/awx/lib/site-packages/celery/backends/__init__.py @@ -14,20 +14,26 @@ from kombu.utils.url import _parse_url from celery.local import Proxy from celery._state import current_app +from celery.five import reraise from celery.utils.imports import symbol_by_name from celery.utils.functional import memoize +__all__ = ['get_backend_cls', 'get_backend_by_url'] + UNKNOWN_BACKEND = """\ -Unknown result backend: %r. Did you spell that correctly? (%r)\ +Unknown result backend: {0!r}. Did you spell that correctly? ({1!r})\ """ BACKEND_ALIASES = { 'amqp': 'celery.backends.amqp:AMQPBackend', + 'rpc': 'celery.backends.rpc.RPCBackend', 'cache': 'celery.backends.cache:CacheBackend', 'redis': 'celery.backends.redis:RedisBackend', 'mongodb': 'celery.backends.mongodb:MongoBackend', + 'db': 'celery.backends.database:DatabaseBackend', 'database': 'celery.backends.database:DatabaseBackend', 'cassandra': 'celery.backends.cassandra:CassandraBackend', + 'couchbase': 'celery.backends.couchbase:CouchBaseBackend', 'disabled': 'celery.backends.base:DisabledBackend', } @@ -43,14 +49,17 @@ def get_backend_cls(backend=None, loader=None): aliases = dict(BACKEND_ALIASES, **loader.override_backends) try: return symbol_by_name(backend, aliases) - except ValueError, exc: - raise ValueError, ValueError(UNKNOWN_BACKEND % ( - backend, exc)), sys.exc_info()[2] + except ValueError as exc: + reraise(ValueError, ValueError(UNKNOWN_BACKEND.format( + backend, exc)), sys.exc_info()[2]) def get_backend_by_url(backend=None, loader=None): url = None if backend and '://' in backend: url = backend - backend, _, _, _, _, _, _ = _parse_url(url) + if '+' in url[:url.index('://')]: + backend, url = url.split('+', 1) + else: + backend, _, _, _, _, _, _ = _parse_url(url) return get_backend_cls(backend, loader), url diff --git a/awx/lib/site-packages/celery/backends/amqp.py b/awx/lib/site-packages/celery/backends/amqp.py index 7a6154f0c2..cc07f55361 100644 --- a/awx/lib/site-packages/celery/backends/amqp.py +++ b/awx/lib/site-packages/celery/backends/amqp.py @@ -9,19 +9,24 @@ """ from __future__ import absolute_import -from __future__ import with_statement import socket -import threading -import time + +from collections import deque +from operator import itemgetter from kombu import Exchange, Queue, Producer, Consumer from celery import states from celery.exceptions import TimeoutError +from celery.five import range, monotonic +from celery.utils.functional import dictfilter from celery.utils.log import get_logger +from celery.utils.timeutils import maybe_s_to_ms -from .base import BaseDictBackend +from .base import BaseBackend + +__all__ = ['BacklogLimitExceeded', 'AMQPBackend'] logger = get_logger(__name__) @@ -37,10 +42,14 @@ def repair_uuid(s): return '%s-%s-%s-%s-%s' % (s[:8], s[8:12], s[12:16], s[16:20], s[20:]) -class AMQPBackend(BaseDictBackend): +class NoCacheQueue(Queue): + can_cache_declaration = False + + +class AMQPBackend(BaseBackend): """Publishes results by sending messages.""" Exchange = Exchange - Queue = Queue + Queue = NoCacheQueue Consumer = Consumer Producer = Producer @@ -56,43 +65,37 @@ class AMQPBackend(BaseDictBackend): 'interval_max': 1, } - def __init__(self, connection=None, exchange=None, exchange_type=None, - persistent=None, serializer=None, auto_delete=True, - **kwargs): - super(AMQPBackend, self).__init__(**kwargs) + def __init__(self, app, connection=None, exchange=None, exchange_type=None, + persistent=None, serializer=None, auto_delete=True, **kwargs): + super(AMQPBackend, self).__init__(app, **kwargs) conf = self.app.conf self._connection = connection - self.queue_arguments = {} - self.persistent = (conf.CELERY_RESULT_PERSISTENT if persistent is None - else persistent) - delivery_mode = persistent and 'persistent' or 'transient' + self.persistent = self.prepare_persistent(persistent) + self.delivery_mode = 2 if self.persistent else 1 exchange = exchange or conf.CELERY_RESULT_EXCHANGE exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE - self.exchange = self.Exchange(name=exchange, - type=exchange_type, - delivery_mode=delivery_mode, - durable=self.persistent, - auto_delete=False) + self.exchange = self._create_exchange( + exchange, exchange_type, self.delivery_mode, + ) self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER self.auto_delete = auto_delete - # AMQP_TASK_RESULT_EXPIRES setting is deprecated and will be - # removed in version 4.0. - dexpires = conf.CELERY_AMQP_TASK_RESULT_EXPIRES - self.expires = None - if 'expires' in kwargs: - if kwargs['expires'] is not None: - self.expires = self.prepare_expires(kwargs['expires']) - else: - self.expires = self.prepare_expires(dexpires) + if 'expires' not in kwargs or kwargs['expires'] is not None: + self.expires = self.prepare_expires(kwargs.get('expires')) + self.queue_arguments = dictfilter({ + 'x-expires': maybe_s_to_ms(self.expires), + }) - if self.expires: - self.queue_arguments['x-expires'] = int(self.expires * 1000) - self.mutex = threading.Lock() + def _create_exchange(self, name, type='direct', delivery_mode=2): + return self.Exchange(name=name, + type=type, + delivery_mode=delivery_mode, + durable=self.persistent, + auto_delete=False) def _create_binding(self, task_id): - name = task_id.replace('-', '') + name = self.rkey(task_id) return self.Queue(name=name, exchange=self.exchange, routing_key=name, @@ -103,39 +106,46 @@ class AMQPBackend(BaseDictBackend): def revive(self, channel): pass - def _republish(self, channel, task_id, body, content_type, - content_encoding): - return Producer(channel).publish( - body, - exchange=self.exchange, - routing_key=task_id.replace('-', ''), - serializer=self.serializer, - content_type=content_type, - content_encoding=content_encoding, - retry=True, retry_policy=self.retry_policy, - declare=[self._create_binding(task_id)], - ) + def rkey(self, task_id): + return task_id.replace('-', '') - def _store_result(self, task_id, result, status, traceback=None): + def destination_for(self, task_id, request): + if request: + return self.rkey(task_id), request.correlation_id or task_id + return self.rkey(task_id), task_id + + def store_result(self, task_id, result, status, + traceback=None, request=None, **kwargs): """Send task return value and status.""" - with self.mutex: - with self.app.amqp.producer_pool.acquire(block=True) as pub: - pub.publish({'task_id': task_id, 'status': status, - 'result': self.encode_result(result, status), - 'traceback': traceback, - 'children': self.current_task_children()}, - exchange=self.exchange, - routing_key=task_id.replace('-', ''), - serializer=self.serializer, - retry=True, retry_policy=self.retry_policy, - declare=[self._create_binding(task_id)]) + routing_key, correlation_id = self.destination_for(task_id, request) + if not routing_key: + return + with self.app.amqp.producer_pool.acquire(block=True) as producer: + producer.publish( + {'task_id': task_id, 'status': status, + 'result': self.encode_result(result, status), + 'traceback': traceback, + 'children': self.current_task_children(request)}, + exchange=self.exchange, + routing_key=routing_key, + correlation_id=correlation_id, + serializer=self.serializer, + retry=True, retry_policy=self.retry_policy, + declare=self.on_reply_declare(task_id), + delivery_mode=self.delivery_mode, + ) return result + def on_reply_declare(self, task_id): + return [self._create_binding(task_id)] + def wait_for(self, task_id, timeout=None, cache=True, propagate=True, + READY_STATES=states.READY_STATES, + PROPAGATE_STATES=states.PROPAGATE_STATES, **kwargs): cached_meta = self._cache.get(task_id) if cache and cached_meta and \ - cached_meta['status'] in states.READY_STATES: + cached_meta['status'] in READY_STATES: meta = cached_meta else: try: @@ -143,24 +153,22 @@ class AMQPBackend(BaseDictBackend): except socket.timeout: raise TimeoutError('The operation timed out.') - state = meta['status'] - if state == states.SUCCESS: - return meta['result'] - elif state in states.PROPAGATE_STATES: - if propagate: - raise self.exception_to_python(meta['result']) - return meta['result'] - else: - return self.wait_for(task_id, timeout, cache) + if meta['status'] in PROPAGATE_STATES and propagate: + raise self.exception_to_python(meta['result']) + # consume() always returns READY_STATE. + return meta['result'] def get_task_meta(self, task_id, backlog_limit=1000): # Polling and using basic_get with self.app.pool.acquire_channel(block=True) as (_, channel): binding = self._create_binding(task_id)(channel) binding.declare() + prev = latest = acc = None - for i in xrange(backlog_limit): # spool ffwd - prev, latest, acc = latest, acc, binding.get(no_ack=False) + for i in range(backlog_limit): # spool ffwd + prev, latest, acc = latest, acc, binding.get( + accept=self.accept, no_ack=False, + ) if not acc: # no more messages break if prev: @@ -183,14 +191,14 @@ class AMQPBackend(BaseDictBackend): return {'status': states.PENDING, 'result': None} poll = get_task_meta # XXX compat - def drain_events(self, connection, consumer, timeout=None, now=time.time): - wait = connection.drain_events + def drain_events(self, connection, consumer, + timeout=None, now=monotonic, wait=None): + wait = wait or connection.drain_events results = {} def callback(meta, message): if meta['status'] in states.READY_STATES: - uuid = repair_uuid(message.delivery_info['routing_key']) - results[uuid] = meta + results[meta['task_id']] = meta consumer.callbacks[:] = [callback] time_start = now() @@ -206,33 +214,65 @@ class AMQPBackend(BaseDictBackend): return results def consume(self, task_id, timeout=None): + wait = self.drain_events with self.app.pool.acquire_channel(block=True) as (conn, channel): binding = self._create_binding(task_id) - with self.Consumer(channel, binding, no_ack=True) as consumer: - return self.drain_events(conn, consumer, timeout).values()[0] + with self.Consumer(channel, binding, + no_ack=True, accept=self.accept) as consumer: + while 1: + try: + return wait(conn, consumer, timeout)[task_id] + except KeyError: + continue - def get_many(self, task_ids, timeout=None, **kwargs): + def _many_bindings(self, ids): + return [self._create_binding(task_id) for task_id in ids] + + def get_many(self, task_ids, timeout=None, + now=monotonic, getfields=itemgetter('status', 'task_id'), + READY_STATES=states.READY_STATES, + PROPAGATE_STATES=states.PROPAGATE_STATES, **kwargs): with self.app.pool.acquire_channel(block=True) as (conn, channel): ids = set(task_ids) cached_ids = set() + mark_cached = cached_ids.add for task_id in ids: try: cached = self._cache[task_id] except KeyError: pass else: - if cached['status'] in states.READY_STATES: + if cached['status'] in READY_STATES: yield task_id, cached - cached_ids.add(task_id) - ids ^= cached_ids + mark_cached(task_id) + ids.difference_update(cached_ids) + results = deque() + push_result = results.append + push_cache = self._cache.__setitem__ + to_exception = self.exception_to_python - bindings = [self._create_binding(task_id) for task_id in task_ids] - with self.Consumer(channel, bindings, no_ack=True) as consumer: + def on_message(message): + body = message.decode() + state, uid = getfields(body) + if state in READY_STATES: + if state in PROPAGATE_STATES: + body['result'] = to_exception(body['result']) + push_result(body) \ + if uid in task_ids else push_cache(uid, body) + + bindings = self._many_bindings(task_ids) + with self.Consumer(channel, bindings, on_message=on_message, + accept=self.accept, no_ack=True): + wait = conn.drain_events + popleft = results.popleft while ids: - r = self.drain_events(conn, consumer, timeout) - ids ^= set(r) - for ready_id, ready_meta in r.iteritems(): - yield ready_id, ready_meta + wait(timeout=timeout) + while results: + state = popleft() + task_id = state['task_id'] + ids.discard(task_id) + push_cache(task_id, state) + yield task_id, state def reload_task_result(self, task_id): raise NotImplementedError( diff --git a/awx/lib/site-packages/celery/backends/base.py b/awx/lib/site-packages/celery/backends/base.py index 888bfe4be1..c9a028ed16 100644 --- a/awx/lib/site-packages/celery/backends/base.py +++ b/awx/lib/site-packages/celery/backends/base.py @@ -7,8 +7,6 @@ - :class:`BaseBackend` defines the interface. - - :class:`BaseDictBackend` assumes the fields are stored in a dict. - - :class:`KeyValueStoreBackend` is a common base class using K/V semantics like _get and _put. @@ -21,32 +19,38 @@ import sys from datetime import timedelta from billiard.einfo import ExceptionInfo -from kombu import serialization +from kombu.serialization import ( + dumps, loads, prepare_accept_content, + registry as serializer_registry, +) from kombu.utils.encoding import bytes_to_str, ensure_bytes, from_utf8 from celery import states from celery.app import current_task -from celery.datastructures import LRUCache -from celery.exceptions import ChordError, TaskRevokedError, TimeoutError -from celery.result import from_serializable, GroupResult +from celery.exceptions import ChordError, TimeoutError, TaskRevokedError +from celery.five import items +from celery.result import result_from_tuple, GroupResult from celery.utils import timeutils +from celery.utils.functional import LRUCache from celery.utils.serialization import ( get_pickled_exception, get_pickleable_exception, create_exception_cls, ) +__all__ = ['BaseBackend', 'KeyValueStoreBackend', 'DisabledBackend'] + EXCEPTION_ABLE_CODECS = frozenset(['pickle', 'yaml']) -is_py3k = sys.version_info >= (3, 0) +PY3 = sys.version_info >= (3, 0) def unpickle_backend(cls, args, kwargs): - """Returns an unpickled backend.""" - return cls(*args, **kwargs) + """Return an unpickled backend.""" + from celery import current_app + return cls(*args, app=current_app._get_current_object(), **kwargs) class BaseBackend(object): - """Base backend class.""" READY_STATES = states.READY_STATES UNREADY_STATES = states.UNREADY_STATES EXCEPTION_STATES = states.EXCEPTION_STATES @@ -66,57 +70,37 @@ class BaseBackend(object): #: in this case. supports_autoexpire = False - def __init__(self, *args, **kwargs): - from celery.app import app_or_default - self.app = app_or_default(kwargs.get('app')) - self.serializer = kwargs.get('serializer', - self.app.conf.CELERY_RESULT_SERIALIZER) + #: Set to true if the backend is peristent by default. + persistent = True + + def __init__(self, app, serializer=None, + max_cached_results=None, accept=None, **kwargs): + self.app = app + conf = self.app.conf + self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER (self.content_type, self.content_encoding, - self.encoder) = serialization.registry._encoders[self.serializer] - - def encode(self, data): - _, _, payload = serialization.encode(data, serializer=self.serializer) - return payload - - def decode(self, payload): - payload = is_py3k and payload or str(payload) - return serialization.decode(payload, - content_type=self.content_type, - content_encoding=self.content_encoding) - - def prepare_expires(self, value, type=None): - if value is None: - value = self.app.conf.CELERY_TASK_RESULT_EXPIRES - if isinstance(value, timedelta): - value = timeutils.timedelta_seconds(value) - if value is not None and type: - return type(value) - return value - - def encode_result(self, result, status): - if status in self.EXCEPTION_STATES and isinstance(result, Exception): - return self.prepare_exception(result) - else: - return self.prepare_value(result) - - def store_result(self, task_id, result, status, traceback=None): - """Store the result and status of a task.""" - raise NotImplementedError( - 'store_result is not supported by this backend.') + self.encoder) = serializer_registry._encoders[self.serializer] + self._cache = LRUCache( + limit=max_cached_results or conf.CELERY_MAX_CACHED_RESULTS, + ) + self.accept = prepare_accept_content( + conf.CELERY_ACCEPT_CONTENT if accept is None else accept, + ) def mark_as_started(self, task_id, **meta): """Mark a task as started""" return self.store_result(task_id, meta, status=states.STARTED) - def mark_as_done(self, task_id, result): + def mark_as_done(self, task_id, result, request=None): """Mark task as successfully executed.""" - return self.store_result(task_id, result, status=states.SUCCESS) + return self.store_result(task_id, result, + status=states.SUCCESS, request=request) - def mark_as_failure(self, task_id, exc, traceback=None): + def mark_as_failure(self, task_id, exc, traceback=None, request=None): """Mark task as executed with failure. Stores the execption.""" return self.store_result(task_id, exc, status=states.FAILURE, - traceback=traceback) + traceback=traceback, request=request) def fail_from_current_stack(self, task_id, exc=None): type_, real_exc, tb = sys.exc_info() @@ -128,15 +112,16 @@ class BaseBackend(object): finally: del(tb) - def mark_as_retry(self, task_id, exc, traceback=None): + def mark_as_retry(self, task_id, exc, traceback=None, request=None): """Mark task as being retries. Stores the current exception (if any).""" return self.store_result(task_id, exc, status=states.RETRY, - traceback=traceback) + traceback=traceback, request=request) - def mark_as_revoked(self, task_id, reason=''): + def mark_as_revoked(self, task_id, reason='', request=None): return self.store_result(task_id, TaskRevokedError(reason), - status=states.REVOKED, traceback=None) + status=states.REVOKED, traceback=None, + request=request) def prepare_exception(self, exc): """Prepare exception for serialization.""" @@ -148,18 +133,25 @@ class BaseBackend(object): """Convert serialized exception to Python exception.""" if self.serializer in EXCEPTION_ABLE_CODECS: return get_pickled_exception(exc) - return create_exception_cls(from_utf8(exc['exc_type']), - sys.modules[__name__])(exc['exc_message']) + return create_exception_cls( + from_utf8(exc['exc_type']), __name__)(exc['exc_message']) def prepare_value(self, result): """Prepare value for storage.""" if isinstance(result, GroupResult): - return result.serializable() + return result.as_tuple() return result - def forget(self, task_id): - raise NotImplementedError('%s does not implement forget.' % ( - self.__class__)) + def encode(self, data): + _, _, payload = dumps(data, serializer=self.serializer) + return payload + + def decode(self, payload): + payload = PY3 and payload or str(payload) + return loads(payload, + content_type=self.content_type, + content_encoding=self.content_encoding, + accept=self.accept) def wait_for(self, task_id, timeout=None, propagate=True, interval=0.5): """Wait for task and return its result. @@ -190,96 +182,36 @@ class BaseBackend(object): if timeout and time_elapsed >= timeout: raise TimeoutError('The operation timed out.') - def cleanup(self): - """Backend cleanup. Is run by - :class:`celery.task.DeleteExpiredTaskMetaTask`.""" - pass + def prepare_expires(self, value, type=None): + if value is None: + value = self.app.conf.CELERY_TASK_RESULT_EXPIRES + if isinstance(value, timedelta): + value = timeutils.timedelta_seconds(value) + if value is not None and type: + return type(value) + return value - def process_cleanup(self): - """Cleanup actions to do at the end of a task worker process.""" - pass + def prepare_persistent(self, enabled=None): + if enabled is not None: + return enabled + p = self.app.conf.CELERY_RESULT_PERSISTENT + return self.persistent if p is None else p - def get_status(self, task_id): - """Get the status of a task.""" - raise NotImplementedError( - 'get_status is not supported by this backend.') - - def get_result(self, task_id): - """Get the result of a task.""" - raise NotImplementedError( - 'get_result is not supported by this backend.') - - def get_children(self, task_id): - raise NotImplementedError( - 'get_children is not supported by this backend.') - - def get_traceback(self, task_id): - """Get the traceback for a failed task.""" - raise NotImplementedError( - 'get_traceback is not supported by this backend.') - - def save_group(self, group_id, result): - """Store the result and status of a task.""" - - raise NotImplementedError( - 'save_group is not supported by %s.' % (type(self).__name__, )) - - def restore_group(self, group_id, cache=True): - """Get the result of a group.""" - raise NotImplementedError( - 'restore_group is not supported by this backend.') - - def delete_group(self, group_id): - raise NotImplementedError( - 'delete_group is not supported by this backend.') - - def reload_task_result(self, task_id): - """Reload task result, even if it has been previously fetched.""" - raise NotImplementedError( - 'reload_task_result is not supported by this backend.') - - def reload_group_result(self, task_id): - """Reload group result, even if it has been previously fetched.""" - raise NotImplementedError( - 'reload_group_result is not supported by this backend.') - - def on_chord_part_return(self, task, propagate=True): - pass - - def fallback_chord_unlock(self, group_id, body, result=None, - countdown=1, **kwargs): - kwargs['result'] = [r.id for r in result] - self.app.tasks['celery.chord_unlock'].apply_async( - (group_id, body, ), kwargs, countdown=countdown, - ) - on_chord_apply = fallback_chord_unlock - - def current_task_children(self): - current = current_task() - if current: - return [r.serializable() for r in current.request.children] - - def __reduce__(self, args=(), kwargs={}): - return (unpickle_backend, (self.__class__, args, kwargs)) - - def is_cached(self, task_id): - return False - - -class BaseDictBackend(BaseBackend): - - def __init__(self, *args, **kwargs): - super(BaseDictBackend, self).__init__(*args, **kwargs) - self._cache = LRUCache(limit=kwargs.get('max_cached_results') or - self.app.conf.CELERY_MAX_CACHED_RESULTS) + def encode_result(self, result, status): + if status in self.EXCEPTION_STATES and isinstance(result, Exception): + return self.prepare_exception(result) + else: + return self.prepare_value(result) def is_cached(self, task_id): return task_id in self._cache - def store_result(self, task_id, result, status, traceback=None, **kwargs): - """Store task result and status.""" + def store_result(self, task_id, result, status, + traceback=None, request=None, **kwargs): + """Update task state and result.""" result = self.encode_result(result, status) - self._store_result(task_id, result, status, traceback, **kwargs) + self._store_result(task_id, result, status, traceback, + request=request, **kwargs) return result def forget(self, task_id): @@ -287,8 +219,7 @@ class BaseDictBackend(BaseBackend): self._forget(task_id) def _forget(self, task_id): - raise NotImplementedError('%s does not implement forget.' % ( - self.__class__)) + raise NotImplementedError('backend does not implement forget.') def get_status(self, task_id): """Get the status of a task.""" @@ -326,11 +257,12 @@ class BaseDictBackend(BaseBackend): return meta def reload_task_result(self, task_id): + """Reload task result, even if it has been previously fetched.""" self._cache[task_id] = self.get_task_meta(task_id, cache=False) def reload_group_result(self, group_id): - self._cache[group_id] = self.get_group_meta(group_id, - cache=False) + """Reload group result, even if it has been previously fetched.""" + self._cache[group_id] = self.get_group_meta(group_id, cache=False) def get_group_meta(self, group_id, cache=True): if cache: @@ -358,8 +290,40 @@ class BaseDictBackend(BaseBackend): self._cache.pop(group_id, None) return self._delete_group(group_id) + def cleanup(self): + """Backend cleanup. Is run by + :class:`celery.task.DeleteExpiredTaskMetaTask`.""" + pass -class KeyValueStoreBackend(BaseDictBackend): + def process_cleanup(self): + """Cleanup actions to do at the end of a task worker process.""" + pass + + def on_task_call(self, producer, task_id): + return {} + + def on_chord_part_return(self, task, propagate=False): + pass + + def fallback_chord_unlock(self, group_id, body, result=None, + countdown=1, **kwargs): + kwargs['result'] = [r.as_tuple() for r in result] + self.app.tasks['celery.chord_unlock'].apply_async( + (group_id, body, ), kwargs, countdown=countdown, + ) + on_chord_apply = fallback_chord_unlock + + def current_task_children(self, request=None): + request = request or getattr(current_task(), 'request', None) + if request: + return [r.as_tuple() for r in getattr(request, 'children', [])] + + def __reduce__(self, args=(), kwargs={}): + return (unpickle_backend, (self.__class__, args, kwargs)) +BaseDictBackend = BaseBackend # XXX compat + + +class KeyValueStoreBackend(BaseBackend): task_keyprefix = ensure_bytes('celery-task-meta-') group_keyprefix = ensure_bytes('celery-taskset-meta-') chord_keyprefix = ensure_bytes('chord-unlock-') @@ -407,7 +371,7 @@ class KeyValueStoreBackend(BaseDictBackend): if hasattr(values, 'items'): # client returns dict so mapping preserved. return dict((self._strip_prefix(k), self.decode(v)) - for k, v in values.iteritems() + for k, v in items(values) if v is not None) else: # client returns list so need to recreate mapping. @@ -415,46 +379,50 @@ class KeyValueStoreBackend(BaseDictBackend): for i, value in enumerate(values) if value is not None) - def get_many(self, task_ids, timeout=None, interval=0.5): - ids = set(task_ids) + def get_many(self, task_ids, timeout=None, interval=0.5, + READY_STATES=states.READY_STATES): + interval = 0.5 if interval is None else interval + ids = task_ids if isinstance(task_ids, set) else set(task_ids) cached_ids = set() + cache = self._cache for task_id in ids: try: - cached = self._cache[task_id] + cached = cache[task_id] except KeyError: pass else: - if cached['status'] in states.READY_STATES: + if cached['status'] in READY_STATES: yield bytes_to_str(task_id), cached cached_ids.add(task_id) - ids ^= cached_ids + ids.difference_update(cached_ids) iterations = 0 while ids: keys = list(ids) r = self._mget_to_results(self.mget([self.get_key_for_task(k) for k in keys]), keys) - self._cache.update(r) - ids ^= set(bytes_to_str(v) for v in r) - for key, value in r.iteritems(): + cache.update(r) + ids.difference_update(set(bytes_to_str(v) for v in r)) + for key, value in items(r): yield bytes_to_str(key), value if timeout and iterations * interval >= timeout: - raise TimeoutError('Operation timed out (%s)' % (timeout, )) + raise TimeoutError('Operation timed out ({0})'.format(timeout)) time.sleep(interval) # don't busy loop. iterations += 1 def _forget(self, task_id): self.delete(self.get_key_for_task(task_id)) - def _store_result(self, task_id, result, status, traceback=None): + def _store_result(self, task_id, result, status, + traceback=None, request=None, **kwargs): meta = {'status': status, 'result': result, 'traceback': traceback, - 'children': self.current_task_children()} + 'children': self.current_task_children(request)} self.set(self.get_key_for_task(task_id), self.encode(meta)) return result def _save_group(self, group_id, result): self.set(self.get_key_for_group(group_id), - self.encode({'result': result.serializable()})) + self.encode({'result': result.as_tuple()})) return result def _delete_group(self, group_id): @@ -476,8 +444,7 @@ class KeyValueStoreBackend(BaseDictBackend): if meta: meta = self.decode(meta) result = meta['result'] - if isinstance(result, (list, tuple)): - return {'result': from_serializable(result, self.app)} + meta['result'] = result_from_tuple(result, self.app) return meta def on_chord_apply(self, group_id, body, result=None, **kwargs): @@ -489,7 +456,7 @@ class KeyValueStoreBackend(BaseDictBackend): def on_chord_part_return(self, task, propagate=None): if not self.implements_incr: return - from celery import subtask + from celery import maybe_signature from celery.result import GroupResult app = self.app if propagate is None: @@ -498,29 +465,51 @@ class KeyValueStoreBackend(BaseDictBackend): if not gid: return key = self.get_key_for_chord(gid) - deps = GroupResult.restore(gid, backend=task.backend) + try: + deps = GroupResult.restore(gid, backend=task.backend) + except Exception as exc: + callback = maybe_signature(task.request.chord, app=self.app) + return app._tasks[callback.task].backend.fail_from_current_stack( + callback.id, + exc=ChordError('Cannot restore group: {0!r}'.format(exc)), + ) + if deps is None: + try: + raise ValueError(gid) + except ValueError as exc: + callback = maybe_signature(task.request.chord, app=self.app) + task = app._tasks[callback.task] + return task.backend.fail_from_current_stack( + callback.id, + exc=ChordError('GroupResult {0} no longer exists'.format( + gid, + )) + ) val = self.incr(key) if val >= len(deps): + callback = maybe_signature(task.request.chord, app=self.app) j = deps.join_native if deps.supports_native_join else deps.join - callback = subtask(task.request.chord) try: ret = j(propagate=propagate) - except Exception, exc: + except Exception as exc: try: - culprit = deps._failed_join_report().next() - reason = 'Dependency %s raised %r' % (culprit.id, exc) + culprit = next(deps._failed_join_report()) + reason = 'Dependency {0.id} raised {1!r}'.format( + culprit, exc, + ) except StopIteration: reason = repr(exc) + app._tasks[callback.task].backend.fail_from_current_stack( callback.id, exc=ChordError(reason), ) else: try: callback.delay(ret) - except Exception, exc: + except Exception as exc: app._tasks[callback.task].backend.fail_from_current_stack( callback.id, - exc=ChordError('Callback error: %r' % (exc, )), + exc=ChordError('Callback error: {0!r}'.format(exc)), ) finally: deps.delete() diff --git a/awx/lib/site-packages/celery/backends/cache.py b/awx/lib/site-packages/celery/backends/cache.py index 411a07a780..3f42d1acf1 100644 --- a/awx/lib/site-packages/celery/backends/cache.py +++ b/awx/lib/site-packages/celery/backends/cache.py @@ -10,11 +10,13 @@ from __future__ import absolute_import from kombu.utils import cached_property -from celery.datastructures import LRUCache from celery.exceptions import ImproperlyConfigured +from celery.utils.functional import LRUCache from .base import KeyValueStoreBackend +__all__ = ['CacheBackend'] + _imp = [None] REQUIRES_BACKEND = """\ @@ -22,8 +24,8 @@ The memcached backend requires either pylibmc or python-memcached.\ """ UNKNOWN_BACKEND = """\ -The cache backend %r is unknown, -Please use one of the following backends instead: %s\ +The cache backend {0!r} is unknown, +Please use one of the following backends instead: {1}\ """ @@ -85,13 +87,14 @@ class CacheBackend(KeyValueStoreBackend): supports_native_join = True implements_incr = True - def __init__(self, expires=None, backend=None, options={}, **kwargs): - super(CacheBackend, self).__init__(self, **kwargs) + def __init__(self, app, expires=None, backend=None, + options={}, url=None, **kwargs): + super(CacheBackend, self).__init__(app, **kwargs) self.options = dict(self.app.conf.CELERY_CACHE_BACKEND_OPTIONS, **options) - self.backend = backend or self.app.conf.CELERY_CACHE_BACKEND + self.backend = url or backend or self.app.conf.CELERY_CACHE_BACKEND if self.backend: self.backend, _, servers = self.backend.partition('://') self.servers = servers.rstrip('/').split(';') @@ -99,7 +102,7 @@ class CacheBackend(KeyValueStoreBackend): try: self.Client = backends[self.backend]() except KeyError: - raise ImproperlyConfigured(UNKNOWN_BACKEND % ( + raise ImproperlyConfigured(UNKNOWN_BACKEND.format( self.backend, ', '.join(backends))) def get(self, key): @@ -127,7 +130,7 @@ class CacheBackend(KeyValueStoreBackend): def __reduce__(self, args=(), kwargs={}): servers = ';'.join(self.servers) - backend = '%s://%s/' % (self.backend, servers) + backend = '{0}://{1}/'.format(self.backend, servers) kwargs.update( dict(backend=backend, expires=self.expires, diff --git a/awx/lib/site-packages/celery/backends/cassandra.py b/awx/lib/site-packages/celery/backends/cassandra.py index 66ae7b2851..774e6b7927 100644 --- a/awx/lib/site-packages/celery/backends/cassandra.py +++ b/awx/lib/site-packages/celery/backends/cassandra.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# -* coding: utf-8 -*- """ celery.backends.cassandra ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -20,15 +20,18 @@ import time from celery import states from celery.exceptions import ImproperlyConfigured +from celery.five import monotonic from celery.utils.log import get_logger from celery.utils.timeutils import maybe_timedelta, timedelta_seconds -from .base import BaseDictBackend +from .base import BaseBackend + +__all__ = ['CassandraBackend'] logger = get_logger(__name__) -class CassandraBackend(BaseDictBackend): +class CassandraBackend(BaseBackend): """Highly fault tolerant Cassandra backend. .. attribute:: servers @@ -100,7 +103,7 @@ class CassandraBackend(BaseDictBackend): self._column_family = None def _retry_on_error(self, fun, *args, **kwargs): - ts = time.time() + self._retry_timeout + ts = monotonic() + self._retry_timeout while 1: try: return fun(*args, **kwargs) @@ -110,8 +113,8 @@ class CassandraBackend(BaseDictBackend): pycassa.AllServersUnavailable, socket.error, socket.timeout, - Thrift.TException), exc: - if time.time() > ts: + Thrift.TException) as exc: + if monotonic() > ts: raise logger.warning('Cassandra error: %r. Retrying...', exc) time.sleep(self._retry_wait) @@ -132,7 +135,8 @@ class CassandraBackend(BaseDictBackend): if self._column_family is not None: self._column_family = None - def _store_result(self, task_id, result, status, traceback=None): + def _store_result(self, task_id, result, status, + traceback=None, request=None, **kwargs): """Store return value and status of an executed task.""" def _do_store(): @@ -141,7 +145,9 @@ class CassandraBackend(BaseDictBackend): meta = {'status': status, 'date_done': date_done.strftime('%Y-%m-%dT%H:%M:%SZ'), 'traceback': self.encode(traceback), - 'children': self.encode(self.current_task_children())} + 'children': self.encode( + self.current_task_children(request), + )} if self.detailed_mode: meta['result'] = result cf.insert(task_id, {date_done: self.encode(meta)}, @@ -161,7 +167,7 @@ class CassandraBackend(BaseDictBackend): try: if self.detailed_mode: row = cf.get(task_id, column_reversed=True, column_count=1) - meta = self.decode(row.values()[0]) + meta = self.decode(list(row.values())[0]) meta['task_id'] = task_id else: obj = cf.get(task_id) diff --git a/awx/lib/site-packages/celery/backends/couchbase.py b/awx/lib/site-packages/celery/backends/couchbase.py new file mode 100644 index 0000000000..2d51b80010 --- /dev/null +++ b/awx/lib/site-packages/celery/backends/couchbase.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- +""" + celery.backends.couchbase + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + CouchBase result store backend. + +""" +from __future__ import absolute_import + +import logging + +try: + from couchbase import Couchbase + from couchbase.connection import Connection + from couchbase.exceptions import NotFoundError +except ImportError: + Couchbase = Connection = NotFoundError = None # noqa + +from kombu.utils.url import _parse_url + +from celery.exceptions import ImproperlyConfigured +from celery.utils.timeutils import maybe_timedelta + +from .base import KeyValueStoreBackend + +__all__ = ['CouchBaseBackend'] + + +class CouchBaseBackend(KeyValueStoreBackend): + bucket = 'default' + host = 'localhost' + port = 8091 + username = None + password = None + quiet = False + conncache = None + unlock_gil = True + timeout = 2.5 + transcoder = None + # supports_autoexpire = False + + def __init__(self, url=None, *args, **kwargs): + """Initialize CouchBase backend instance. + + :raises celery.exceptions.ImproperlyConfigured: if + module :mod:`couchbase` is not available. + + """ + super(CouchBaseBackend, self).__init__(*args, **kwargs) + + self.expires = kwargs.get('expires') or maybe_timedelta( + self.app.conf.CELERY_TASK_RESULT_EXPIRES) + + if Couchbase is None: + raise ImproperlyConfigured( + 'You need to install the couchbase library to use the ' + 'CouchBase backend.', + ) + + uhost = uport = uname = upass = ubucket = None + if url: + _, uhost, uport, uname, upass, ubucket, _ = _parse_url(url) + ubucket = ubucket.strip('/') if ubucket else None + + config = self.app.conf.get('CELERY_COUCHBASE_BACKEND_SETTINGS', None) + if config is not None: + if not isinstance(config, dict): + raise ImproperlyConfigured( + 'Couchbase backend settings should be grouped in a dict', + ) + else: + config = {} + + self.host = uhost or config.get('host', self.host) + self.port = int(uport or config.get('port', self.port)) + self.bucket = ubucket or config.get('bucket', self.bucket) + self.username = uname or config.get('username', self.username) + self.password = upass or config.get('password', self.password) + + self._connection = None + + def _get_connection(self): + """Connect to the Couchbase server.""" + if self._connection is None: + kwargs = {'bucket': self.bucket, 'host': self.host} + + if self.port: + kwargs.update({'port': self.port}) + if self.username: + kwargs.update({'username': self.username}) + if self.password: + kwargs.update({'password': self.password}) + + logging.debug('couchbase settings %r', kwargs) + self._connection = Connection(**kwargs) + return self._connection + + @property + def connection(self): + return self._get_connection() + + def get(self, key): + try: + return self.connection.get(key).value + except NotFoundError: + return None + + def set(self, key, value): + self.connection.set(key, value) + + def mget(self, keys): + return [self.get(key) for key in keys] + + def delete(self, key): + self.connection.delete(key) diff --git a/awx/lib/site-packages/celery/backends/database/__init__.py b/awx/lib/site-packages/celery/backends/database/__init__.py index 7bb98cfc2e..58109e7824 100644 --- a/awx/lib/site-packages/celery/backends/database/__init__.py +++ b/awx/lib/site-packages/celery/backends/database/__init__.py @@ -12,13 +12,16 @@ from functools import wraps from celery import states from celery.exceptions import ImproperlyConfigured +from celery.five import range from celery.utils.timeutils import maybe_timedelta -from celery.backends.base import BaseDictBackend +from celery.backends.base import BaseBackend from .models import Task, TaskSet from .session import ResultSession +__all__ = ['DatabaseBackend'] + def _sqlalchemy_installed(): try: @@ -39,28 +42,30 @@ def retry(fun): def _inner(*args, **kwargs): max_retries = kwargs.pop('max_retries', 3) - for retries in xrange(max_retries + 1): + for retries in range(max_retries): try: return fun(*args, **kwargs) except (DatabaseError, OperationalError): - if retries + 1 > max_retries: + if retries + 1 >= max_retries: raise return _inner -class DatabaseBackend(BaseDictBackend): +class DatabaseBackend(BaseBackend): """The database result backend.""" # ResultSet.iterate should sleep this much between each pool, # to not bombard the database with queries. subpolling_interval = 0.5 def __init__(self, dburi=None, expires=None, - engine_options=None, **kwargs): + engine_options=None, url=None, **kwargs): + # The `url` argument was added later and is used by + # the app to set backend by url (celery.backends.get_backend_by_url) super(DatabaseBackend, self).__init__(**kwargs) conf = self.app.conf self.expires = maybe_timedelta(self.prepare_expires(expires)) - self.dburi = dburi or conf.CELERY_RESULT_DBURI + self.dburi = url or dburi or conf.CELERY_RESULT_DBURI self.engine_options = dict( engine_options or {}, **conf.CELERY_RESULT_ENGINE_OPTIONS or {}) @@ -68,6 +73,11 @@ class DatabaseBackend(BaseDictBackend): 'short_lived_sessions', conf.CELERY_RESULT_DB_SHORT_LIVED_SESSIONS, ) + + tablenames = conf.CELERY_RESULT_DB_TABLENAMES or {} + Task.__table__.name = tablenames.get('task', 'celery_taskmeta') + TaskSet.__table__.name = tablenames.get('group', 'celery_tasksetmeta') + if not self.dburi: raise ImproperlyConfigured( 'Missing connection string! Do you have ' @@ -82,7 +92,7 @@ class DatabaseBackend(BaseDictBackend): @retry def _store_result(self, task_id, result, status, - traceback=None, max_retries=3): + traceback=None, max_retries=3, **kwargs): """Store return value and status of an executed task.""" session = self.ResultSession() try: diff --git a/awx/lib/site-packages/celery/backends/database/a805d4bd.py b/awx/lib/site-packages/celery/backends/database/a805d4bd.py deleted file mode 100644 index e0d14a1fea..0000000000 --- a/awx/lib/site-packages/celery/backends/database/a805d4bd.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.database.a805d4bd - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - This module fixes a bug with pickling and relative imports in Python < 2.6. - - The problem is with pickling an e.g. `exceptions.KeyError` instance. - As SQLAlchemy has its own `exceptions` module, pickle will try to - lookup :exc:`KeyError` in the wrong module, resulting in this exception:: - - cPickle.PicklingError: Can't pickle : - attribute lookup exceptions.KeyError failed - - doing `import exceptions` just before the dump in `sqlalchemy.types` - reveals the source of the bug:: - - EXCEPTIONS: - - Hence the random module name 'a805d5bd' is taken to decrease the chances of - a collision. - -""" -from __future__ import absolute_import - -from sqlalchemy.types import PickleType as _PickleType - - -class PickleType(_PickleType): # pragma: no cover - - def bind_processor(self, dialect): - impl_processor = self.impl.bind_processor(dialect) - dumps = self.pickler.dumps - protocol = self.protocol - if impl_processor: - - def process(value): - if value is not None: - value = dumps(value, protocol) - return impl_processor(value) - - else: - - def process(value): # noqa - if value is not None: - value = dumps(value, protocol) - return value - return process - - def result_processor(self, dialect, coltype): - impl_processor = self.impl.result_processor(dialect, coltype) - loads = self.pickler.loads - if impl_processor: - - def process(value): - value = impl_processor(value) - if value is not None: - return loads(value) - else: - - def process(value): # noqa - if value is not None: - return loads(value) - return process - - def copy_value(self, value): - if self.mutable: - return self.pickler.loads(self.pickler.dumps(value, self.protocol)) - else: - return value diff --git a/awx/lib/site-packages/celery/backends/database/dfd042c7.py b/awx/lib/site-packages/celery/backends/database/dfd042c7.py deleted file mode 100644 index ea932a74c9..0000000000 --- a/awx/lib/site-packages/celery/backends/database/dfd042c7.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.backends.database.dfd042c7 - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - SQLAlchemy 0.5.8 version of :mod:`~celery.backends.database.a805d4bd`, - see the docstring of that module for an explanation of why we need - this workaround. - -""" -from __future__ import absolute_import - -from sqlalchemy.types import PickleType as _PickleType -from sqlalchemy import util - - -class PickleType(_PickleType): # pragma: no cover - - def process_bind_param(self, value, dialect): - dumps = self.pickler.dumps - protocol = self.protocol - if value is not None: - return dumps(value, protocol) - - def process_result_value(self, value, dialect): - loads = self.pickler.loads - if value is not None: - return loads(str(value)) - - def copy_value(self, value): - if self.mutable: - return self.pickler.loads(self.pickler.dumps(value, self.protocol)) - else: - return value - - def compare_values(self, x, y): - if self.comparator: - return self.comparator(x, y) - elif self.mutable and not hasattr(x, '__eq__') and x is not None: - util.warn_deprecated( - 'Objects stored with PickleType when mutable=True ' - 'must implement __eq__() for reliable comparison.') - a = self.pickler.dumps(x, self.protocol) - b = self.pickler.dumps(y, self.protocol) - return a == b - else: - return x == y - - def is_mutable(self): - return self.mutable diff --git a/awx/lib/site-packages/celery/backends/database/models.py b/awx/lib/site-packages/celery/backends/database/models.py index f34d7252c7..45acb97ef2 100644 --- a/awx/lib/site-packages/celery/backends/database/models.py +++ b/awx/lib/site-packages/celery/backends/database/models.py @@ -11,16 +11,13 @@ from __future__ import absolute_import from datetime import datetime import sqlalchemy as sa +from sqlalchemy.types import PickleType from celery import states from .session import ResultModelBase -# See docstring of a805d4bd for an explanation for this workaround ;) -if sa.__version__.startswith('0.5'): - from .dfd042c7 import PickleType -else: - from .a805d4bd import PickleType # noqa +__all__ = ['Task', 'TaskSet'] class Task(ResultModelBase): @@ -49,7 +46,7 @@ class Task(ResultModelBase): 'date_done': self.date_done} def __repr__(self): - return '' % (self.task_id, self.status) + return ''.format(self) class TaskSet(ResultModelBase): @@ -74,4 +71,4 @@ class TaskSet(ResultModelBase): 'date_done': self.date_done} def __repr__(self): - return '' % (self.taskset_id, ) + return ''.format(self) diff --git a/awx/lib/site-packages/celery/backends/database/session.py b/awx/lib/site-packages/celery/backends/database/session.py index ce1808ee63..fef3843e4f 100644 --- a/awx/lib/site-packages/celery/backends/database/session.py +++ b/awx/lib/site-packages/celery/backends/database/session.py @@ -9,6 +9,7 @@ from __future__ import absolute_import from collections import defaultdict +from multiprocessing.util import register_after_fork from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker @@ -20,11 +21,29 @@ _SETUP = defaultdict(lambda: False) _ENGINES = {} _SESSIONS = {} +__all__ = ['ResultSession', 'get_engine', 'create_session'] + + +class _after_fork(object): + registered = False + + def __call__(self): + self.registered = False # child must reregister + for engine in list(_ENGINES.values()): + engine.dispose() + _ENGINES.clear() + _SESSIONS.clear() +after_fork = _after_fork() + def get_engine(dburi, **kwargs): - if dburi not in _ENGINES: - _ENGINES[dburi] = create_engine(dburi, **kwargs) - return _ENGINES[dburi] + try: + return _ENGINES[dburi] + except KeyError: + engine = _ENGINES[dburi] = create_engine(dburi, **kwargs) + after_fork.registered = True + register_after_fork(after_fork, after_fork) + return engine def create_session(dburi, short_lived_sessions=False, **kwargs): diff --git a/awx/lib/site-packages/celery/backends/mongodb.py b/awx/lib/site-packages/celery/backends/mongodb.py index 30bbf673f3..c3229d51c6 100644 --- a/awx/lib/site-packages/celery/backends/mongodb.py +++ b/awx/lib/site-packages/celery/backends/mongodb.py @@ -23,13 +23,17 @@ if pymongo: else: # pragma: no cover Binary = None # noqa +from kombu.syn import detect_environment from kombu.utils import cached_property from celery import states from celery.exceptions import ImproperlyConfigured +from celery.five import string_t from celery.utils.timeutils import maybe_timedelta -from .base import BaseDictBackend +from .base import BaseBackend + +__all__ = ['MongoBackend'] class Bunch(object): @@ -38,18 +42,20 @@ class Bunch(object): self.__dict__.update(kw) -class MongoBackend(BaseDictBackend): - mongodb_host = 'localhost' - mongodb_port = 27017 - mongodb_user = None - mongodb_password = None - mongodb_database = 'celery' - mongodb_taskmeta_collection = 'celery_taskmeta' - mongodb_max_pool_size = 10 - mongodb_options = None +class MongoBackend(BaseBackend): + host = 'localhost' + port = 27017 + user = None + password = None + database_name = 'celery' + taskmeta_collection = 'celery_taskmeta' + max_pool_size = 10 + options = None supports_autoexpire = False + _connection = None + def __init__(self, *args, **kwargs): """Initialize MongoDB backend instance. @@ -57,6 +63,7 @@ class MongoBackend(BaseDictBackend): module :mod:`pymongo` is not available. """ + self.options = {} super(MongoBackend, self).__init__(*args, **kwargs) self.expires = kwargs.get('expires') or maybe_timedelta( self.app.conf.CELERY_TASK_RESULT_EXPIRES) @@ -66,47 +73,54 @@ class MongoBackend(BaseDictBackend): 'You need to install the pymongo library to use the ' 'MongoDB backend.') - config = self.app.conf.get('CELERY_MONGODB_BACKEND_SETTINGS', None) + config = self.app.conf.get('CELERY_MONGODB_BACKEND_SETTINGS') if config is not None: if not isinstance(config, dict): raise ImproperlyConfigured( 'MongoDB backend settings should be grouped in a dict') + config = dict(config) # do not modify original - self.mongodb_host = config.get('host', self.mongodb_host) - self.mongodb_port = int(config.get('port', self.mongodb_port)) - self.mongodb_user = config.get('user', self.mongodb_user) - self.mongodb_options = config.get('options', {}) - self.mongodb_password = config.get( - 'password', self.mongodb_password) - self.mongodb_database = config.get( - 'database', self.mongodb_database) - self.mongodb_taskmeta_collection = config.get( - 'taskmeta_collection', self.mongodb_taskmeta_collection) - self.mongodb_max_pool_size = config.get( - 'max_pool_size', self.mongodb_max_pool_size) + self.host = config.pop('host', self.host) + self.port = int(config.pop('port', self.port)) + self.user = config.pop('user', self.user) + self.password = config.pop('password', self.password) + self.database_name = config.pop('database', self.database_name) + self.taskmeta_collection = config.pop( + 'taskmeta_collection', self.taskmeta_collection, + ) - self._connection = None + self.options = dict(config, **config.pop('options', None) or {}) + + # Set option defaults + self.options.setdefault('ssl', self.app.conf.BROKER_USE_SSL) + self.options.setdefault('max_pool_size', self.max_pool_size) + self.options.setdefault('auto_start_request', False) + + url = kwargs.get('url') + if url: + # Specifying backend as an URL + self.host = url def _get_connection(self): """Connect to the MongoDB server.""" if self._connection is None: - from pymongo.connection import Connection + from pymongo import MongoClient # The first pymongo.Connection() argument (host) can be # a list of ['host:port'] elements or a mongodb connection - # URI. If this is the case, don't use self.mongodb_port + # URI. If this is the case, don't use self.port # but let pymongo get the port(s) from the URI instead. # This enables the use of replica sets and sharding. # See pymongo.Connection() for more info. - args = [self.mongodb_host] - kwargs = {'max_pool_size': self.mongodb_max_pool_size} - if isinstance(self.mongodb_host, basestring) \ - and not self.mongodb_host.startswith('mongodb://'): - args.append(self.mongodb_port) - - self._connection = Connection( - *args, **dict(kwargs, **self.mongodb_options or {}) - ) + url = self.host + if isinstance(url, string_t) \ + and not url.startswith('mongodb://'): + url = 'mongodb://{0}:{1}'.format(url, self.port) + if url == 'mongodb://': + url = url + 'localhost' + if detect_environment() != 'default': + self.options['use_greenlets'] = True + self._connection = MongoClient(host=url, **self.options) return self._connection @@ -118,14 +132,17 @@ class MongoBackend(BaseDictBackend): del(self.database) self._connection = None - def _store_result(self, task_id, result, status, traceback=None): + def _store_result(self, task_id, result, status, + traceback=None, request=None, **kwargs): """Store return value and status of an executed task.""" meta = {'_id': task_id, 'status': status, 'result': Binary(self.encode(result)), 'date_done': datetime.utcnow(), 'traceback': Binary(self.encode(traceback)), - 'children': Binary(self.encode(self.current_task_children()))} + 'children': Binary(self.encode( + self.current_task_children(request), + ))} self.collection.save(meta) return result @@ -200,10 +217,10 @@ class MongoBackend(BaseDictBackend): def _get_database(self): conn = self._get_connection() - db = conn[self.mongodb_database] - if self.mongodb_user and self.mongodb_password: - if not db.authenticate(self.mongodb_user, - self.mongodb_password): + db = conn[self.database_name] + if self.user and self.password: + if not db.authenticate(self.user, + self.password): raise ImproperlyConfigured( 'Invalid MongoDB username or password.') return db @@ -217,7 +234,7 @@ class MongoBackend(BaseDictBackend): @cached_property def collection(self): """Get the metadata task collection.""" - collection = self.database[self.mongodb_taskmeta_collection] + collection = self.database[self.taskmeta_collection] # Ensure an index on date_done is there, if not process the index # in the background. Once completed cleanup will be much faster diff --git a/awx/lib/site-packages/celery/backends/redis.py b/awx/lib/site-packages/celery/backends/redis.py index 38583de31a..4b09802e31 100644 --- a/awx/lib/site-packages/celery/backends/redis.py +++ b/awx/lib/site-packages/celery/backends/redis.py @@ -22,6 +22,12 @@ except ImportError: # pragma: no cover redis = None # noqa ConnectionError = None # noqa +__all__ = ['RedisBackend'] + +REDIS_MISSING = """\ +You need to install the redis library in order to use \ +the Redis result store backend.""" + class RedisBackend(KeyValueStoreBackend): """Redis task result store.""" @@ -53,15 +59,13 @@ class RedisBackend(KeyValueStoreBackend): super(RedisBackend, self).__init__(**kwargs) conf = self.app.conf if self.redis is None: - raise ImproperlyConfigured( - 'You need to install the redis library in order to use ' - 'the Redis result store backend.') + raise ImproperlyConfigured(REDIS_MISSING) # For compatibility with the old REDIS_* configuration keys. def _get(key): - for prefix in 'CELERY_REDIS_%s', 'REDIS_%s': + for prefix in 'CELERY_REDIS_{0}', 'REDIS_{0}': try: - return conf[prefix % key] + return conf[prefix.format(key)] except KeyError: pass if host and '://' in host: diff --git a/awx/lib/site-packages/celery/backends/rpc.py b/awx/lib/site-packages/celery/backends/rpc.py new file mode 100644 index 0000000000..28d54263f9 --- /dev/null +++ b/awx/lib/site-packages/celery/backends/rpc.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +""" + celery.backends.rpc + ~~~~~~~~~~~~~~~~~~~ + + RPC-style result backend, using reply-to and one queue per client. + +""" +from __future__ import absolute_import + +from kombu import Consumer, Exchange +from kombu.common import maybe_declare +from kombu.utils import cached_property + +from celery import current_task +from celery.backends import amqp + +__all__ = ['RPCBackend'] + + +class RPCBackend(amqp.AMQPBackend): + persistent = False + + class Consumer(Consumer): + auto_declare = False + + def _create_exchange(self, name, type='direct', delivery_mode=2): + # uses direct to queue routing (anon exchange). + return Exchange(None) + + def on_task_call(self, producer, task_id): + maybe_declare(self.binding(producer.channel), retry=True) + + def _create_binding(self, task_id): + return self.binding + + def _many_bindings(self, ids): + return [self.binding] + + def rkey(self, task_id): + return task_id + + def destination_for(self, task_id, request): + # Request is a new argument for backends, so must still support + # old code that rely on current_task + try: + request = request or current_task.request + except AttributeError: + raise RuntimeError( + 'RPC backend missing task request for {0!r}'.format(task_id), + ) + return request.reply_to, request.correlation_id or task_id + + def on_reply_declare(self, task_id): + pass + + @property + def binding(self): + return self.Queue(self.oid, self.exchange, self.oid, + durable=False, auto_delete=False) + + @cached_property + def oid(self): + return self.app.oid diff --git a/awx/lib/site-packages/celery/beat.py b/awx/lib/site-packages/celery/beat.py index 207985c9f5..14101aab2a 100644 --- a/awx/lib/site-packages/celery/beat.py +++ b/awx/lib/site-packages/celery/beat.py @@ -7,7 +7,6 @@ """ from __future__ import absolute_import -from __future__ import with_statement import errno import os @@ -16,20 +15,23 @@ import shelve import sys import traceback +from threading import Event, Thread + from billiard import Process, ensure_multiprocessing from kombu.utils import cached_property, reprcall -from kombu.utils.functional import maybe_promise +from kombu.utils.functional import maybe_evaluate from . import __version__ from . import platforms from . import signals -from . import current_app -from .app import app_or_default +from .five import items, reraise, values, monotonic from .schedules import maybe_schedule, crontab from .utils.imports import instantiate -from .utils.threads import Event, Thread from .utils.timeutils import humanize_seconds -from .utils.log import get_logger +from .utils.log import get_logger, iter_open_logger_fds + +__all__ = ['SchedulingError', 'ScheduleEntry', 'Scheduler', + 'PersistentScheduler', 'Service', 'EmbeddedService'] logger = get_logger(__name__) debug, info, error, warning = (logger.debug, logger.info, @@ -79,21 +81,22 @@ class ScheduleEntry(object): def __init__(self, name=None, task=None, last_run_at=None, total_run_count=None, schedule=None, args=(), kwargs={}, - options={}, relative=False): + options={}, relative=False, app=None): + self.app = app self.name = name self.task = task self.args = args self.kwargs = kwargs self.options = options - self.schedule = maybe_schedule(schedule, relative) + self.schedule = maybe_schedule(schedule, relative, app=self.app) self.last_run_at = last_run_at or self._default_now() self.total_run_count = total_run_count or 0 def _default_now(self): - return self.schedule.now() if self.schedule else current_app.now() + return self.schedule.now() if self.schedule else self.app.now() def _next_instance(self, last_run_at=None): - """Returns a new instance of the same class, but with + """Return a new instance of the same class, but with its date and count fields updated.""" return self.__class__(**dict( self, @@ -102,6 +105,12 @@ class ScheduleEntry(object): )) __next__ = next = _next_instance # for 2to3 + def __reduce__(self): + return self.__class__, ( + self.name, self.task, self.last_run_at, self.total_run_count, + self.schedule, self.args, self.kwargs, self.options, + ) + def update(self, other): """Update values from another entry. @@ -118,24 +127,28 @@ class ScheduleEntry(object): return self.schedule.is_due(self.last_run_at) def __iter__(self): - return vars(self).iteritems() + return iter(items(vars(self))) def __repr__(self): - return ' self.sync_every) + (monotonic() - self._last_sync) > self.sync_every) def reserve(self, entry): - new_entry = self.schedule[entry.name] = entry.next() + new_entry = self.schedule[entry.name] = next(entry) return new_entry def apply_async(self, entry, publisher=None, **kwargs): @@ -228,10 +241,10 @@ class Scheduler(object): result = self.send_task(entry.task, entry.args, entry.kwargs, publisher=publisher, **entry.options) - except Exception, exc: - raise SchedulingError, SchedulingError( - "Couldn't apply scheduled task %s: %s" % ( - entry.name, exc)), sys.exc_info()[2] + except Exception as exc: + reraise(SchedulingError, SchedulingError( + "Couldn't apply scheduled task {0.name}: {exc}".format( + entry, exc)), sys.exc_info()[2]) finally: if self.should_sync(): self._do_sync() @@ -245,10 +258,10 @@ class Scheduler(object): def _do_sync(self): try: - debug('Celerybeat: Synchronizing schedule...') + debug('beat: Synchronizing schedule...') self.sync() finally: - self._last_sync = time.time() + self._last_sync = monotonic() def sync(self): pass @@ -257,19 +270,20 @@ class Scheduler(object): self.sync() def add(self, **kwargs): - entry = self.Entry(**kwargs) + entry = self.Entry(app=self.app, **kwargs) self.schedule[entry.name] = entry return entry def _maybe_entry(self, name, entry): if isinstance(entry, self.Entry): + entry.app = self.app return entry - return self.Entry(**dict(entry, name=name)) + return self.Entry(**dict(entry, name=name, app=self.app)) def update_from_dict(self, dict_): self.schedule.update(dict( (name, self._maybe_entry(name, entry)) - for name, entry in dict_.items())) + for name, entry in items(dict_))) def merge_inplace(self, b): schedule = self.schedule @@ -281,7 +295,7 @@ class Scheduler(object): # Update and add new items in the schedule for key in B: - entry = self.Entry(**dict(b[key], name=key)) + entry = self.Entry(**dict(b[key], name=key, app=self.app)) if schedule.get(key): schedule[key].update(entry) else: @@ -291,7 +305,7 @@ class Scheduler(object): # callback called for each retry while the connection # can't be established. def _error_handler(exc, interval): - error('Celerybeat: Connection error: %s. ' + error('beat: Connection error: %s. ' 'Trying again in %s seconds...', exc, interval) return self.connection.ensure_connection( @@ -338,7 +352,7 @@ class PersistentScheduler(Scheduler): self._store = self.persistence.open(self.schedule_filename, writeback=True) entries = self._store.setdefault('entries', {}) - except Exception, exc: + except Exception as exc: error('Removing corrupted schedule file %r: %r', self.schedule_filename, exc, exc_info=True) self._remove_db() @@ -373,7 +387,7 @@ class PersistentScheduler(Scheduler): self._store.update(__version__=__version__, tz=tz, utc_enabled=utc) self.sync() debug('Current schedule:\n' + '\n'.join( - repr(entry) for entry in entries.itervalues())) + repr(entry) for entry in values(entries))) def get_schedule(self): return self._store['entries'] @@ -392,15 +406,15 @@ class PersistentScheduler(Scheduler): @property def info(self): - return ' . db -> %s' % (self.schedule_filename, ) + return ' . db -> {self.schedule_filename}'.format(self=self) class Service(object): scheduler_cls = PersistentScheduler - def __init__(self, max_interval=None, schedule_filename=None, - scheduler_cls=None, app=None): - app = self.app = app_or_default(app) + def __init__(self, app, max_interval=None, schedule_filename=None, + scheduler_cls=None): + self.app = app self.max_interval = (max_interval or app.conf.CELERYBEAT_MAX_LOOP_INTERVAL) self.scheduler_cls = scheduler_cls or self.scheduler_cls @@ -415,19 +429,19 @@ class Service(object): self.scheduler_cls, self.app) def start(self, embedded_process=False): - info('Celerybeat: Starting...') - debug('Celerybeat: Ticking with max interval->%s', + info('beat: Starting...') + debug('beat: Ticking with max interval->%s', humanize_seconds(self.scheduler.max_interval)) signals.beat_init.send(sender=self) if embedded_process: signals.beat_embedded_init.send(sender=self) - platforms.set_process_title('celerybeat') + platforms.set_process_title('celery beat') try: while not self._is_shutdown.is_set(): interval = self.scheduler.tick() - debug('Celerybeat: Waking up %s.', + debug('beat: Waking up %s.', humanize_seconds(interval, prefix='in ')) time.sleep(interval) except (KeyboardInterrupt, SystemExit): @@ -440,7 +454,7 @@ class Service(object): self._is_stopped.set() def stop(self, wait=False): - info('Celerybeat: Shutting down...') + info('beat: Shutting down...') self._is_shutdown.set() wait and self._is_stopped.wait() # block until shutdown done. @@ -488,6 +502,9 @@ else: def run(self): platforms.signals.reset('SIGTERM') + platforms.close_open_fds([ + sys.__stdin__, sys.__stdout__, sys.__stderr__, + ] + list(iter_open_logger_fds())) self.service.start(embedded_process=True) def stop(self): @@ -499,7 +516,7 @@ def EmbeddedService(*args, **kwargs): """Return embedded clock service. :keyword thread: Run threaded instead of as a separate process. - Default is :const:`False`. + Uses :mod:`multiprocessing` by default, if available. """ if kwargs.pop('thread', False) or _Process is None: diff --git a/awx/lib/site-packages/celery/bin/__init__.py b/awx/lib/site-packages/celery/bin/__init__.py index e69de29bb2..3f44b50240 100644 --- a/awx/lib/site-packages/celery/bin/__init__.py +++ b/awx/lib/site-packages/celery/bin/__init__.py @@ -0,0 +1,5 @@ +from __future__ import absolute_import + +from .base import Option + +__all__ = ['Option'] diff --git a/awx/lib/site-packages/celery/bin/camqadm.py b/awx/lib/site-packages/celery/bin/amqp.py similarity index 80% rename from awx/lib/site-packages/celery/bin/camqadm.py rename to awx/lib/site-packages/celery/bin/amqp.py index 366df42960..8dcb34c136 100644 --- a/awx/lib/site-packages/celery/bin/camqadm.py +++ b/awx/lib/site-packages/celery/bin/amqp.py @@ -5,26 +5,27 @@ The :program:`celery amqp` command. .. program:: celery amqp """ -from __future__ import absolute_import +from __future__ import absolute_import, print_function import cmd import sys import shlex import pprint +from collections import Callable +from functools import partial from itertools import count -try: - import amqp -except ImportError: - from amqplib import client_0_8 as amqp # noqa +from amqp import Message -from celery.app import app_or_default from celery.utils.functional import padlist from celery.bin.base import Command +from celery.five import string_t from celery.utils import strtobool +__all__ = ['AMQPAdmin', 'AMQShell', 'Spec', 'amqp'] + # Map to coerce strings to other types. COERCE = {bool: strtobool} @@ -38,9 +39,7 @@ Example: -> queue.delete myqueue yes no """ - -def say(m, fh=sys.stderr): - fh.write('%s\n' % (m, )) +say = partial(print, file=sys.stderr) class Spec(object): @@ -98,16 +97,14 @@ class Spec(object): def format_response(self, response): """Format the return value of this command in a human-friendly way.""" if not self.returns: - if response is None: - return 'ok.' - return response - if callable(self.returns): + return 'ok.' if response is None else response + if isinstance(self.returns, Callable): return self.returns(response) - return self.returns % (response, ) + return self.returns.format(response) def format_arg(self, name, type, default_value=None): if default_value is not None: - return '%s:%s' % (name, default_value) + return '{0}:{1}'.format(name, default_value) return name def format_signature(self): @@ -124,7 +121,7 @@ def dump_message(message): def format_declare_queue(ret): - return 'ok. queue:%s messages:%s consumers:%s.' % ret + return 'ok. queue:{0} messages:{1} consumers:{2}.'.format(*ret) class AMQShell(cmd.Cmd): @@ -148,11 +145,11 @@ class AMQShell(cmd.Cmd): """ conn = None chan = None - prompt_fmt = '%d> ' + prompt_fmt = '{self.counter}> ' identchars = cmd.IDENTCHARS = '.' needs_reconnect = False counter = 1 - inc_counter = count(2).next + inc_counter = count(2) builtins = {'EOF': 'do_exit', 'exit': 'do_exit', @@ -179,13 +176,13 @@ class AMQShell(cmd.Cmd): 'queue.delete': Spec(('queue', str), ('if_unused', bool, 'no'), ('if_empty', bool, 'no'), - returns='ok. %d messages deleted.'), + returns='ok. {0} messages deleted.'), 'queue.purge': Spec(('queue', str), - returns='ok. %d messages deleted.'), + returns='ok. {0} messages deleted.'), 'basic.get': Spec(('queue', str), ('no_ack', bool, 'off'), returns=dump_message), - 'basic.publish': Spec(('msg', amqp.Message), + 'basic.publish': Spec(('msg', Message), ('exchange', str), ('routing_key', str), ('mandatory', bool, 'no'), @@ -203,10 +200,10 @@ class AMQShell(cmd.Cmd): def note(self, m): """Say something to the user. Disabled if :attr:`silent`.""" if not self.silent: - say(m, fh=self.out) + say(m, file=self.out) def say(self, m): - say(m, fh=self.out) + say(m, file=self.out) def get_amqp_api_command(self, cmd, arglist): """With a command name and a list of arguments, convert the arguments @@ -215,13 +212,6 @@ class AMQShell(cmd.Cmd): :returns: tuple of `(method, processed_args)`. - Example: - - >>> get_amqp_api_command('queue.delete', ['pobox', 'yes', 'no']) - (>, - ('testfoo', True, False)) - """ spec = self.amqp[cmd] args = spec.str_args_to_python(arglist) @@ -237,7 +227,7 @@ class AMQShell(cmd.Cmd): def display_command_help(self, cmd, short=False): spec = self.amqp[cmd] - self.say('%s %s' % (cmd, spec.format_signature())) + self.say('{0} {1}'.format(cmd, spec.format_signature())) def do_help(self, *args): if not args: @@ -249,7 +239,7 @@ class AMQShell(cmd.Cmd): self.display_command_help(args[0]) def default(self, line): - self.say("unknown syntax: '%s'. how about some 'help'?" % line) + self.say("unknown syntax: {0!r}. how about some 'help'?".format(line)) def get_names(self): return set(self.builtins) | set(self.amqp) @@ -282,11 +272,6 @@ class AMQShell(cmd.Cmd): :returns: tuple of three items: `(command_name, arglist, original_line)` - E.g:: - - >>> parseline('queue.delete A 'B' C') - ('queue.delete', 'A 'B' C', 'queue.delete A 'B' C') - """ parts = line.split() if parts: @@ -298,25 +283,20 @@ class AMQShell(cmd.Cmd): cmd, arg, line = self.parseline(line) if not line: return self.emptyline() - if cmd is None: - return self.default(line) self.lastcmd = line - if cmd == '': - return self.default(line) - else: - self.counter = self.inc_counter() - try: - self.respond(self.dispatch(cmd, arg)) - except (AttributeError, KeyError), exc: - self.default(line) - except Exception, exc: - self.say(exc) - self.needs_reconnect = True + self.counter = next(self.inc_counter) + try: + self.respond(self.dispatch(cmd, arg)) + except (AttributeError, KeyError) as exc: + self.default(line) + except Exception as exc: + self.say(exc) + self.needs_reconnect = True def respond(self, retval): """What to do with the return value of a command.""" if retval is not None: - if isinstance(retval, basestring): + if isinstance(retval, string_t): self.say(retval) else: self.say(pprint.pformat(retval)) @@ -329,15 +309,15 @@ class AMQShell(cmd.Cmd): @property def prompt(self): - return self.prompt_fmt % self.counter + return self.prompt_fmt.format(self=self) class AMQPAdmin(object): - """The celery :program:`camqadm` utility.""" + """The celery :program:`celery amqp` utility.""" Shell = AMQShell def __init__(self, *args, **kwargs): - self.app = app_or_default(kwargs.get('app')) + self.app = kwargs['app'] self.out = kwargs.setdefault('out', sys.stderr) self.silent = kwargs.get('silent') self.args = args @@ -346,7 +326,7 @@ class AMQPAdmin(object): if conn: conn.close() conn = self.app.connection() - self.note('-> connecting to %s.' % conn.as_uri()) + self.note('-> connecting to {0}.'.format(conn.as_uri())) conn.connect() self.note('-> connected.') return conn @@ -363,22 +343,35 @@ class AMQPAdmin(object): def note(self, m): if not self.silent: - say(m, fh=self.out) + say(m, file=self.out) -class AMQPAdminCommand(Command): +class amqp(Command): + """AMQP Administration Shell. + + Also works for non-amqp transports (but not ones that + store declarations in memory). + + Examples:: + + celery amqp + start shell mode + celery amqp help + show list of commands + + celery amqp exchange.delete name + celery amqp queue.delete queue + celery amqp queue.delete queue yes yes + + """ def run(self, *args, **options): options['app'] = self.app return AMQPAdmin(*args, **options).run() -def camqadm(*args, **options): - AMQPAdmin(*args, **options).run() - - def main(): - AMQPAdminCommand().execute_from_commandline() + amqp().execute_from_commandline() if __name__ == '__main__': # pragma: no cover main() diff --git a/awx/lib/site-packages/celery/bin/base.py b/awx/lib/site-packages/celery/bin/base.py index 9517f6821c..e08677198a 100644 --- a/awx/lib/site-packages/celery/bin/base.py +++ b/awx/lib/site-packages/celery/bin/base.py @@ -63,20 +63,28 @@ in any command that also has a `--detach` option. Optional directory to change to after detaching. """ -from __future__ import absolute_import +from __future__ import absolute_import, print_function import os import re +import socket import sys import warnings +import json from collections import defaultdict +from heapq import heappush +from inspect import getargspec from optparse import OptionParser, IndentedHelpFormatter, make_option as Option +from pprint import pformat from types import ModuleType -import celery +from celery import VERSION_BANNER, Celery, maybe_patch_concurrency +from celery import signals from celery.exceptions import CDeprecationWarning, CPendingDeprecationWarning -from celery.platforms import EX_FAILURE, EX_USAGE, maybe_patch_concurrency +from celery.five import items, string, string_t, values +from celery.platforms import EX_FAILURE, EX_OK, EX_USAGE +from celery.utils import term from celery.utils import text from celery.utils.imports import symbol_by_name, import_from_cwd @@ -85,20 +93,70 @@ for warning in (CDeprecationWarning, CPendingDeprecationWarning): warnings.simplefilter('once', warning, 0) ARGV_DISABLED = """ -Unrecognized command line arguments: %s +Unrecognized command-line arguments: {0} Try --help? """ find_long_opt = re.compile(r'.+?(--.+?)(?:\s|,|$)') find_rst_ref = re.compile(r':\w+:`(.+?)`') +find_sformat = re.compile(r'%(\w)') + +__all__ = ['Error', 'UsageError', 'Extensions', 'HelpFormatter', + 'Command', 'Option', 'daemon_options'] + + +class Error(Exception): + status = EX_FAILURE + + def __init__(self, reason, status=None): + self.reason = reason + self.status = status if status is not None else self.status + super(Error, self).__init__(reason, status) + + def __str__(self): + return self.reason + __unicode__ = __str__ + + +class UsageError(Error): + status = EX_USAGE + + +class Extensions(object): + + def __init__(self, namespace, register): + self.names = [] + self.namespace = namespace + self.register = register + + def add(self, cls, name): + heappush(self.names, name) + self.register(cls, name=name) + + def load(self): + try: + from pkg_resources import iter_entry_points + except ImportError: # pragma: no cover + return + + for ep in iter_entry_points(self.namespace): + sym = ':'.join([ep.module_name, ep.attrs[0]]) + try: + cls = symbol_by_name(sym) + except (ImportError, SyntaxError) as exc: + warnings.warn( + 'Cannot load extension {0!r}: {1!r}'.format(sym, exc)) + else: + self.add(cls, ep.name) + return self.names class HelpFormatter(IndentedHelpFormatter): def format_epilog(self, epilog): if epilog: - return '\n%s\n\n' % epilog + return '\n{0}\n\n'.format(epilog) return '' def format_description(self, description): @@ -107,19 +165,21 @@ class HelpFormatter(IndentedHelpFormatter): class Command(object): - """Base class for command line applications. + """Base class for command-line applications. :keyword app: The current app. :keyword get_app: Callable returning the current app if no app provided. """ + Error = Error + UsageError = UsageError Parser = OptionParser #: Arg list used in help. args = '' #: Application version. - version = celery.VERSION_BANNER + version = VERSION_BANNER #: If false the parser will raise an exception if positional #: args are provided. @@ -142,6 +202,8 @@ class Command(object): Option('--loader', default=None), Option('--config', default=None), Option('--workdir', default=None, dest='working_directory'), + Option('--no-color', '-C', action='store_true', default=None), + Option('--quiet', '-q', action='store_true'), ) #: Enable if the application should support config from the cmdline. @@ -159,18 +221,70 @@ class Command(object): #: Set to true if this command doesn't have subcommands leaf = True - def __init__(self, app=None, get_app=None): + # used by :meth:`say_remote_command_reply`. + show_body = True + # used by :meth:`say_chat`. + show_reply = True + + prog_name = 'celery' + + def __init__(self, app=None, get_app=None, no_color=False, + stdout=None, stderr=None, quiet=False, on_error=None, + on_usage_error=None): self.app = app self.get_app = get_app or self._get_default_app + self.stdout = stdout or sys.stdout + self.stderr = stderr or sys.stderr + self.no_color = no_color + self.colored = term.colored(enabled=not self.no_color) + self.quiet = quiet + if not self.description: + self.description = self.__doc__ + if on_error: + self.on_error = on_error + if on_usage_error: + self.on_usage_error = on_usage_error def run(self, *args, **options): """This is the body of the command called by :meth:`handle_argv`.""" raise NotImplementedError('subclass responsibility') - def execute_from_commandline(self, argv=None): - """Execute application from command line. + def on_error(self, exc): + self.error(self.colored.red('Error: {0}'.format(exc))) - :keyword argv: The list of command line arguments. + def on_usage_error(self, exc): + self.handle_error(exc) + + def on_concurrency_setup(self): + pass + + def __call__(self, *args, **kwargs): + self.verify_args(args) + try: + ret = self.run(*args, **kwargs) + return ret if ret is not None else EX_OK + except self.UsageError as exc: + self.on_usage_error(exc) + return exc.status + except self.Error as exc: + self.on_error(exc) + return exc.status + + def verify_args(self, given, _index=0): + S = getargspec(self.run) + _index = 1 if S.args and S.args[0] == 'self' else _index + required = S.args[_index:-len(S.defaults) if S.defaults else None] + missing = required[len(given):] + if missing: + raise self.UsageError('Missing required {0}: {1}'.format( + text.pluralize(len(missing), 'argument'), + ', '.join(missing) + )) + + def execute_from_commandline(self, argv=None): + """Execute application from command-line. + + :keyword argv: The list of command-line arguments. Defaults to ``sys.argv``. """ @@ -183,11 +297,12 @@ class Command(object): # Dump version and exit if '--version' arg set. self.early_version(argv) argv = self.setup_app_from_commandline(argv) - prog_name = os.path.basename(argv[0]) - return self.handle_argv(prog_name, argv[1:]) + self.prog_name = os.path.basename(argv[0]) + return self.handle_argv(self.prog_name, argv[1:]) - def run_from_argv(self, prog_name, argv=None): - return self.handle_argv(prog_name, sys.argv if argv is None else argv) + def run_from_argv(self, prog_name, argv=None, command=None): + return self.handle_argv(prog_name, + sys.argv if argv is None else argv, command) def maybe_patch_concurrency(self, argv=None): argv = argv or sys.argv @@ -196,24 +311,20 @@ class Command(object): maybe_patch_concurrency(argv, *pool_option) short_opts, long_opts = pool_option - def on_concurrency_setup(self): - pass - def usage(self, command): - """Returns the command line usage string for this app.""" - return '%%prog [options] %s' % (self.args, ) + return '%prog {0} [options] {self.args}'.format(command, self=self) def get_options(self): - """Get supported command line options.""" + """Get supported command-line options.""" return self.option_list def expanduser(self, value): - if isinstance(value, basestring): + if isinstance(value, string_t): return os.path.expanduser(value) return value - def handle_argv(self, prog_name, argv): - """Parses command line arguments from ``argv`` and dispatches + def handle_argv(self, prog_name, argv, command=None): + """Parse command-line arguments from ``argv`` and dispatch to :meth:`run`. :param prog_name: The program name (``argv[0]``). @@ -223,13 +334,14 @@ class Command(object): and ``argv`` contains positional arguments. """ - options, args = self.prepare_args(*self.parse_options(prog_name, argv)) - return self.run(*args, **options) + options, args = self.prepare_args( + *self.parse_options(prog_name, argv, command)) + return self(*args, **options) def prepare_args(self, options, args): if options: options = dict((k, self.expanduser(v)) - for k, v in vars(options).iteritems() + for k, v in items(vars(options)) if not k.startswith('_')) args = [self.expanduser(arg) for arg in args] self.check_args(args) @@ -237,25 +349,36 @@ class Command(object): def check_args(self, args): if not self.supports_args and args: - self.die(ARGV_DISABLED % (', '.join(args, )), EX_USAGE) + self.die(ARGV_DISABLED.format(', '.join(args)), EX_USAGE) + + def error(self, s): + self.out(s, fh=self.stderr) + + def out(self, s, fh=None): + print(s, file=fh or self.stdout) def die(self, msg, status=EX_FAILURE): - sys.stderr.write(msg + '\n') + self.error(msg) sys.exit(status) def early_version(self, argv): if '--version' in argv: - sys.stdout.write('%s\n' % self.version) + print(self.version, file=self.stdout) sys.exit(0) - def parse_options(self, prog_name, arguments): + def parse_options(self, prog_name, arguments, command=None): """Parse the available options.""" # Don't want to load configuration to just print the version, # so we handle --version manually here. - parser = self.create_parser(prog_name) - return parser.parse_args(arguments) + self.parser = self.create_parser(prog_name, command) + return self.parser.parse_args(arguments) def create_parser(self, prog_name, command=None): + option_list = ( + self.preload_options + + self.get_options() + + tuple(self.app.user_options['preload']) + ) return self.prepare_parser(self.Parser( prog=prog_name, usage=self.usage(command), @@ -263,19 +386,25 @@ class Command(object): epilog=self.epilog, formatter=HelpFormatter(), description=self.description, - option_list=(self.preload_options + self.get_options()))) + option_list=option_list, + )) def prepare_parser(self, parser): docs = [self.parse_doc(doc) for doc in (self.doc, __doc__) if doc] for doc in docs: - for long_opt, help in doc.iteritems(): + for long_opt, help in items(doc): option = parser.get_option(long_opt) if option is not None: - option.help = ' '.join(help) % {'default': option.default} + option.help = ' '.join(help).format(default=option.default) return parser def setup_app_from_commandline(self, argv): preload_options = self.parse_preload_options(argv) + quiet = preload_options.get('quiet') + if quiet is not None: + self.quiet = quiet + self.colored.enabled = \ + not preload_options.get('no_color', self.no_color) workdir = preload_options.get('working_directory') if workdir: os.chdir(workdir) @@ -303,7 +432,16 @@ class Command(object): if self.enable_config_from_cmdline: argv = self.process_cmdline_config(argv) else: - self.app = celery.Celery() + self.app = Celery() + + user_preload = tuple(self.app.user_options['preload'] or ()) + if user_preload: + user_options = self.preparse_options(argv, user_preload) + for user_option in user_preload: + user_options.setdefault(user_option.dest, user_option.default) + signals.user_preload_options.send( + sender=self, app=self.app, options=user_options, + ) return argv def find_app(self, app): @@ -313,9 +451,23 @@ class Command(object): # last part was not an attribute, but a module sym = import_from_cwd(app) if isinstance(sym, ModuleType): - if getattr(sym, '__path__', None): - return self.find_app('%s.celery:' % (app.replace(':', ''), )) - return sym.celery + try: + return sym.app + except AttributeError: + try: + return sym.celery + except AttributeError: + if getattr(sym, '__path__', None): + try: + return self.find_app( + '{0}.celery:'.format(app.replace(':', '')), + ) + except ImportError: + pass + for suspect in values(vars(sym)): + if isinstance(suspect, Celery): + return suspect + raise return sym def symbol_by_name(self, name): @@ -332,25 +484,36 @@ class Command(object): return argv def parse_preload_options(self, args): + return self.preparse_options(args, self.preload_options) + + def preparse_options(self, args, options): acc = {} opts = {} - for opt in self.preload_options: + for opt in options: for t in (opt._long_opts, opt._short_opts): - opts.update(dict(zip(t, [opt.dest] * len(t)))) + opts.update(dict(zip(t, [opt] * len(t)))) index = 0 length = len(args) while index < length: arg = args[index] - if arg.startswith('--') and '=' in arg: - key, value = arg.split('=', 1) - dest = opts.get(key) - if dest: - acc[dest] = value + if arg.startswith('--'): + if '=' in arg: + key, value = arg.split('=', 1) + opt = opts.get(key) + if opt: + acc[opt.dest] = value + else: + opt = opts.get(arg) + if opt and opt.action == 'store_true': + acc[opt.dest] = True elif arg.startswith('-'): - dest = opts.get(arg) - if dest: - acc[dest] = args[index + 1] - index += 1 + opt = opts.get(arg) + if opt: + if opt.takes_value(): + acc[opt.dest] = args[index + 1] + index += 1 + elif opt.action == 'store_true': + acc[opt.dest] = True index += 1 return acc @@ -368,7 +531,7 @@ class Command(object): return options def with_pool_option(self, argv): - """Returns tuple of ``(short_opts, long_opts)`` if the command + """Return tuple of ``(short_opts, long_opts)`` if the command supports a pool argument, and used to monkey patch eventlet/gevent environments as early as possible. @@ -377,10 +540,66 @@ class Command(object): """ pass + def simple_format(self, s, match=find_sformat, expand=r'\1', **keys): + if s: + host = socket.gethostname() + name, _, domain = host.partition('.') + keys = dict({'%': '%', 'h': host, 'n': name, 'd': domain}, **keys) + return match.sub(lambda m: keys[m.expand(expand)], s) + return s + def _get_default_app(self, *args, **kwargs): from celery._state import get_current_app return get_current_app() # omit proxy + def pretty_list(self, n): + c = self.colored + if not n: + return '- empty -' + return '\n'.join( + str(c.reset(c.white('*'), ' {0}'.format(item))) for item in n + ) + + def pretty_dict_ok_error(self, n): + c = self.colored + try: + return (c.green('OK'), + text.indent(self.pretty(n['ok'])[1], 4)) + except KeyError: + pass + return (c.red('ERROR'), + text.indent(self.pretty(n['error'])[1], 4)) + + def say_remote_command_reply(self, replies): + c = self.colored + node = next(iter(replies)) # <-- take first. + reply = replies[node] + status, preply = self.pretty(reply) + self.say_chat('->', c.cyan(node, ': ') + status, + text.indent(preply, 4) if self.show_reply else '') + + def pretty(self, n): + OK = str(self.colored.green('OK')) + if isinstance(n, list): + return OK, self.pretty_list(n) + if isinstance(n, dict): + if 'ok' in n or 'error' in n: + return self.pretty_dict_ok_error(n) + else: + return OK, json.dumps(n, sort_keys=True, indent=4) + if isinstance(n, string_t): + return OK, string(n) + return OK, pformat(n) + + def say_chat(self, direction, title, body=''): + c = self.colored + if direction == '<-' and self.quiet: + return + dirstr = not self.quiet and c.bold(c.white(direction), ' ') or '' + self.out(c.reset(dirstr, title)) + if body and self.show_body: + self.out(body) + def daemon_options(default_pidfile=None, default_logfile=None): return ( diff --git a/awx/lib/site-packages/celery/bin/celerybeat.py b/awx/lib/site-packages/celery/bin/beat.py similarity index 67% rename from awx/lib/site-packages/celery/bin/celerybeat.py rename to awx/lib/site-packages/celery/bin/beat.py index 8b16e00075..e0a54d7371 100644 --- a/awx/lib/site-packages/celery/bin/celerybeat.py +++ b/awx/lib/site-packages/celery/bin/beat.py @@ -17,7 +17,7 @@ The :program:`celery beat` command. Path to the schedule database. Defaults to `celerybeat-schedule`. The extension '.db' may be appended to the filename. - Default is %(default)s. + Default is {default}. .. cmdoption:: -S, --scheduler @@ -38,7 +38,6 @@ The :program:`celery beat` command. `ERROR`, `CRITICAL`, or `FATAL`. """ -from __future__ import with_statement from __future__ import absolute_import from functools import partial @@ -47,8 +46,19 @@ from celery.platforms import detached from celery.bin.base import Command, Option, daemon_options +__all__ = ['beat'] -class BeatCommand(Command): + +class beat(Command): + """Start the beat periodic task scheduler. + + Examples:: + + celery beat -l info + celery beat -s /var/run/celery/beat-schedule --detach + celery beat -S djcelery.schedulers.DatabaseScheduler + + """ doc = __doc__ enable_config_from_cmdline = True supports_args = False @@ -70,17 +80,19 @@ class BeatCommand(Command): c = self.app.conf return ( - Option('--detach', action='store_true'), - Option('-s', '--schedule', default=c.CELERYBEAT_SCHEDULE_FILENAME), - Option('--max-interval', type='float'), - Option('-S', '--scheduler', dest='scheduler_cls'), - Option('-l', '--loglevel', default=c.CELERYBEAT_LOG_LEVEL), - ) + daemon_options(default_pidfile='celerybeat.pid') + (Option('--detach', action='store_true'), + Option('-s', '--schedule', + default=c.CELERYBEAT_SCHEDULE_FILENAME), + Option('--max-interval', type='float'), + Option('-S', '--scheduler', dest='scheduler_cls'), + Option('-l', '--loglevel', default=c.CELERYBEAT_LOG_LEVEL)) + + daemon_options(default_pidfile='celerybeat.pid') + + tuple(self.app.user_options['beat']) + ) -def main(): - beat = BeatCommand() - beat.execute_from_commandline() +def main(app=None): + beat(app=app).execute_from_commandline() if __name__ == '__main__': # pragma: no cover main() diff --git a/awx/lib/site-packages/celery/bin/celery.py b/awx/lib/site-packages/celery/bin/celery.py index c0dd4a8e1e..33a1e70798 100644 --- a/awx/lib/site-packages/celery/bin/celery.py +++ b/awx/lib/site-packages/celery/bin/celery.py @@ -6,218 +6,80 @@ The :program:`celery` umbrella command. .. program:: celery """ -from __future__ import absolute_import -from __future__ import with_statement +from __future__ import absolute_import, unicode_literals import anyjson -import heapq import os import sys -import warnings +from functools import partial from importlib import import_module -from pprint import pformat +from celery.five import string_t, values from celery.platforms import EX_OK, EX_FAILURE, EX_UNAVAILABLE, EX_USAGE from celery.utils import term from celery.utils import text -from celery.utils.functional import memoize -from celery.utils.imports import symbol_by_name from celery.utils.timeutils import maybe_iso8601 -from celery.bin.base import Command as BaseCommand, Option +# Cannot use relative imports here due to a Windows issue (#1111). +from celery.bin.base import Command, Option, Extensions + +# Import commands from other modules +from celery.bin.amqp import amqp +from celery.bin.beat import beat +from celery.bin.events import events +from celery.bin.graph import graph +from celery.bin.worker import worker + +__all__ = ['CeleryCommand', 'main'] HELP = """ ---- -- - - ---- Commands- -------------- --- ------------ -%(commands)s +{commands} ---- -- - - --------- -- - -------------- --- ------------ -Type '%(prog_name)s --help' for help using a specific command. +Type '{prog_name} --help' for help using a specific command. """ -commands = {} +MIGRATE_PROGRESS_FMT = """\ +Migrating task {state.count}/{state.strtotal}: \ +{body[task]}[{body[id]}]\ +""" + +DEBUG = os.environ.get('C_DEBUG', False) command_classes = [ ('Main', ['worker', 'events', 'beat', 'shell', 'multi', 'amqp'], 'green'), ('Remote Control', ['status', 'inspect', 'control'], 'blue'), ('Utils', ['purge', 'list', 'migrate', 'call', 'result', 'report'], None), ] - - -@memoize() -def _get_extension_classes(): - extensions = [] - command_classes.append(('Extensions', extensions, 'magenta')) - return extensions - - -def ensure_broadcast_supported(app): - if app.connection().transport.driver_type == 'sql': - raise Error('SQL broker transports does not support broadcast') - - -class Error(Exception): - - def __init__(self, reason, status=EX_FAILURE): - self.reason = reason - self.status = status - super(Error, self).__init__(reason, status) - - def __str__(self): - return self.reason - - -def command(fun, name=None, sortpri=0): - commands[name or fun.__name__] = fun - fun.sortpri = sortpri - return fun - - -def load_extension_commands(namespace='celery.commands'): - try: - from pkg_resources import iter_entry_points - except ImportError: - return - - for ep in iter_entry_points(namespace): - sym = ':'.join([ep.module_name, ep.attrs[0]]) - try: - cls = symbol_by_name(sym) - except (ImportError, SyntaxError), exc: - warnings.warn('Cannot load extension %r: %r' % (sym, exc)) - else: - heapq.heappush(_get_extension_classes(), ep.name) - command(cls, name=ep.name) - - -class Command(BaseCommand): - help = '' - args = '' - prog_name = 'celery' - show_body = True - show_reply = True - - option_list = ( - Option('--quiet', '-q', action='store_true'), - Option('--no-color', '-C', action='store_true', default=None), +if DEBUG: # pragma: no cover + command_classes.append( + ('Debug', ['graph'], 'red'), ) - def __init__(self, app=None, no_color=False, stdout=sys.stdout, - stderr=sys.stderr, show_reply=True): - super(Command, self).__init__(app=app) - self.colored = term.colored(enabled=not no_color) - self.stdout = stdout - self.stderr = stderr - self.quiet = False - if show_reply is not None: - self.show_reply = show_reply - def __call__(self, *args, **kwargs): - try: - ret = self.run(*args, **kwargs) - except Error, exc: - self.error(self.colored.red('Error: %s' % exc)) - return exc.status - - return ret if ret is not None else EX_OK - - def show_help(self, command): - self.run_from_argv(self.prog_name, [command, '--help']) - return EX_USAGE - - def error(self, s): - self.out(s, fh=self.stderr) - - def out(self, s, fh=None): - s = str(s) - if not s.endswith('\n'): - s += '\n' - (fh or self.stdout).write(s) - - def run_from_argv(self, prog_name, argv): - self.prog_name = prog_name - self.command = argv[0] - self.arglist = argv[1:] - self.parser = self.create_parser(self.prog_name, self.command) - options, args = self.prepare_args( - *self.parser.parse_args(self.arglist)) - self.colored = term.colored(enabled=not options['no_color']) - self.quiet = options.get('quiet', False) - self.show_body = options.get('show_body', True) - return self(*args, **options) - - def usage(self, command): - return '%%prog %s [options] %s' % (command, self.args) - - def prettify_list(self, n): - c = self.colored - if not n: - return '- empty -' - return '\n'.join(str(c.reset(c.white('*'), ' %s' % (item, ))) - for item in n) - - def prettify_dict_ok_error(self, n): - c = self.colored - try: - return (c.green('OK'), - text.indent(self.prettify(n['ok'])[1], 4)) - except KeyError: - pass - return (c.red('ERROR'), - text.indent(self.prettify(n['error'])[1], 4)) - - def say_remote_command_reply(self, replies): - c = self.colored - node = iter(replies).next() # <-- take first. - reply = replies[node] - status, preply = self.prettify(reply) - self.say_chat('->', c.cyan(node, ': ') + status, - text.indent(preply, 4) if self.show_reply else '') - - def prettify(self, n): - OK = str(self.colored.green('OK')) - if isinstance(n, list): - return OK, self.prettify_list(n) - if isinstance(n, dict): - if 'ok' in n or 'error' in n: - return self.prettify_dict_ok_error(n) - if isinstance(n, basestring): - return OK, unicode(n) - return OK, pformat(n) - - def say_chat(self, direction, title, body=''): - c = self.colored - if direction == '<-' and self.quiet: - return - dirstr = not self.quiet and c.bold(c.white(direction), ' ') or '' - self.out(c.reset(dirstr, title)) - if body and self.show_body: - self.out(body) - - @property - def description(self): - return self.__doc__ +def determine_exit_status(ret): + if isinstance(ret, int): + return ret + return EX_OK if ret else EX_FAILURE -class Delegate(Command): - - def __init__(self, *args, **kwargs): - super(Delegate, self).__init__(*args, **kwargs) - - self.target = symbol_by_name(self.Command)(app=self.app) - self.args = self.target.args - - def get_options(self): - return self.option_list + self.target.get_options() - - def create_parser(self, prog_name, command): - parser = super(Delegate, self).create_parser(prog_name, command) - return self.target.prepare_parser(parser) - - def run(self, *args, **kwargs): - self.target.check_args(args) - return self.target.run(*args, **kwargs) +def main(argv=None): + # Fix for setuptools generated scripts, so that it will + # work with multiprocessing fork emulation. + # (see multiprocessing.forking.get_preparation_data()) + try: + if __name__ != '__main__': # pragma: no cover + sys.modules['__main__'] = sys.modules[__name__] + cmd = CeleryCommand() + cmd.maybe_patch_concurrency() + from billiard import freeze_support + freeze_support() + cmd.execute_from_commandline(argv) + except KeyboardInterrupt: + pass class multi(Command): @@ -227,85 +89,11 @@ class multi(Command): def get_options(self): return () - def run_from_argv(self, prog_name, argv): - from celery.bin.celeryd_multi import MultiTool - return MultiTool().execute_from_commandline(argv, prog_name) -multi = command(multi) - - -class worker(Delegate): - """Start worker instance. - - Examples:: - - celery worker --app=proj -l info - celery worker -A proj -l info -Q hipri,lopri - - celery worker -A proj --concurrency=4 - celery worker -A proj --concurrency=1000 -P eventlet - - celery worker --autoscale=10,0 - """ - Command = 'celery.bin.celeryd:WorkerCommand' -worker = command(worker, sortpri=01) - - -class events(Delegate): - """Event-stream utilities. - - Commands:: - - celery events --app=proj - start graphical monitor (requires curses) - celery events -d --app=proj - dump events to screen. - celery events -b amqp:// - celery events -C [options] - run snapshot camera. - - Examples:: - - celery events - celery events -d - celery events -C mod.attr -F 1.0 --detach --maxrate=100/m -l info - """ - Command = 'celery.bin.celeryev:EvCommand' -events = command(events, sortpri=10) - - -class beat(Delegate): - """Start the celerybeat periodic task scheduler. - - Examples:: - - celery beat -l info - celery beat -s /var/run/celerybeat/schedule --detach - celery beat -S djcelery.schedulers.DatabaseScheduler - - """ - Command = 'celery.bin.celerybeat:BeatCommand' -beat = command(beat, sortpri=20) - - -class amqp(Delegate): - """AMQP Administration Shell. - - Also works for non-amqp transports. - - Examples:: - - celery amqp - start shell mode - celery amqp help - show list of commands - - celery amqp exchange.delete name - celery amqp queue.delete queue - celery amqp queue.delete queue yes yes - - """ - Command = 'celery.bin.camqadm:AMQPAdminCommand' -amqp = command(amqp, sortpri=30) + def run_from_argv(self, prog_name, argv, command=None): + from celery.bin.multi import MultiTool + return MultiTool().execute_from_commandline( + [command] + argv, prog_name, + ) class list_(Command): @@ -323,10 +111,9 @@ class list_(Command): try: bindings = management.get_bindings() except NotImplementedError: - raise Error('Your transport cannot list bindings.') + raise self.Error('Your transport cannot list bindings.') - fmt = lambda q, e, r: self.out('%s %s %s' % (q.ljust(28), - e.ljust(28), r)) + fmt = lambda q, e, r: self.out('{0:<28} {1:<28} {2}'.format(q, e, r)) fmt('Queue', 'Exchange', 'Routing Key') fmt('-' * 16, '-' * 16, '-' * 16) for b in bindings: @@ -336,14 +123,15 @@ class list_(Command): topics = {'bindings': self.list_bindings} available = ', '.join(topics) if not what: - raise Error('You must specify what to list (%s)' % available) + raise self.UsageError( + 'You must specify one of {0}'.format(available)) if what not in topics: - raise Error('unknown topic %r (choose one of: %s)' % ( - what, available)) + raise self.UsageError( + 'unknown topic {0!r} (choose one of: {1})'.format( + what, available)) with self.app.connection() as conn: self.app.amqp.TaskConsumer(conn).declare() topics[what](conn.manager) -list_ = command(list_, 'list') class call(Command): @@ -371,12 +159,12 @@ class call(Command): def run(self, name, *_, **kw): # Positional args. args = kw.get('args') or () - if isinstance(args, basestring): + if isinstance(args, string_t): args = anyjson.loads(args) # Keyword args. kwargs = kw.get('kwargs') or {} - if isinstance(kwargs, basestring): + if isinstance(kwargs, string_t): kwargs = anyjson.loads(kwargs) # Expires can be int/float. @@ -399,7 +187,6 @@ class call(Command): eta=maybe_iso8601(kw.get('eta')), expires=expires) self.out(res.id) -call = command(call) class purge(Command): @@ -408,17 +195,17 @@ class purge(Command): WARNING: There is no undo operation for this command. """ + fmt_purged = 'Purged {mnum} {messages} from {qnum} known task {queues}.' + fmt_empty = 'No messages purged from {qnum} {queues}' + def run(self, *args, **kwargs): queues = len(self.app.amqp.queues) - messages_removed = self.app.control.purge() - if messages_removed: - self.out('Purged %s %s from %s known task %s.' % ( - messages_removed, text.pluralize(messages_removed, 'message'), - queues, text.pluralize(queues, 'queue'))) - else: - self.out('No messages purged from %s known %s' % ( - queues, text.pluralize(queues, 'queue'))) -purge = command(purge) + messages = self.app.control.purge() + fmt = self.fmt_purged if messages else self.fmt_empty + self.out(fmt.format( + mnum=messages, qnum=queues, + messages=text.pluralize(messages, 'message'), + queues=text.pluralize(queues, 'queue'))) class result(Command): @@ -450,8 +237,7 @@ class result(Command): value = result.traceback else: value = result.get() - self.out(self.prettify(value)[1]) -result = command(result) + self.out(self.pretty(value)[1]) class _RemoteControl(Command): @@ -464,6 +250,11 @@ class _RemoteControl(Command): Option('--destination', '-d', help='Comma separated list of destination node names.')) + def __init__(self, *args, **kwargs): + self.show_body = kwargs.pop('show_body', True) + self.show_reply = kwargs.pop('show_reply', True) + super(_RemoteControl, self).__init__(*args, **kwargs) + @classmethod def get_command_info(self, command, indent=0, prefix='', color=None, help=False): @@ -475,8 +266,9 @@ class _RemoteControl(Command): # see if it uses args. meth = getattr(self, command) return text.join([ - '|' + text.indent('%s%s %s' % (prefix, color(command), - meth.__doc__), indent), help, + '|' + text.indent('{0}{1} {2}'.format( + prefix, color(command), meth.__doc__), indent), + help, ]) except AttributeError: @@ -499,7 +291,7 @@ class _RemoteControl(Command): ]) def usage(self, command): - return '%%prog %s [options] %s [arg1 .. argN]' % ( + return '%prog {0} [options] {1} [arg1 .. argN]'.format( command, self.args) def call(self, *args, **kwargs): @@ -507,46 +299,39 @@ class _RemoteControl(Command): def run(self, *args, **kwargs): if not args: - raise Error('Missing %s method. See --help' % self.name) + raise self.UsageError( + 'Missing {0.name} method. See --help'.format(self)) return self.do_call_method(args, **kwargs) def do_call_method(self, args, **kwargs): method = args[0] if method == 'help': - raise Error("Did you mean '%s --help'?" % self.name) + raise self.Error("Did you mean '{0.name} --help'?".format(self)) if method not in self.choices: - raise Error('Unknown %s method %s' % (self.name, method)) + raise self.UsageError( + 'Unknown {0.name} method {1}'.format(self, method)) - ensure_broadcast_supported(self.app) + if self.app.connection().transport.driver_type == 'sql': + raise self.Error('Broadcast not supported by SQL broker transport') destination = kwargs.get('destination') timeout = kwargs.get('timeout') or self.choices[method][0] - if destination and isinstance(destination, basestring): - destination = [v.strip() for v in destination.split(',')] + if destination and isinstance(destination, string_t): + destination = [dest.strip() for dest in destination.split(',')] try: handler = getattr(self, method) except AttributeError: handler = self.call - # XXX Python 2.5 does not support X(*args, foo=1) - kwargs = {"timeout": timeout, "destination": destination, - "callback": self.say_remote_command_reply} - replies = handler(method, *args[1:], **kwargs) + replies = handler(method, *args[1:], timeout=timeout, + destination=destination, + callback=self.say_remote_command_reply) if not replies: - raise Error('No nodes replied within time constraint.', - status=EX_UNAVAILABLE) + raise self.Error('No nodes replied within time constraint.', + status=EX_UNAVAILABLE) return replies - def say(self, direction, title, body=''): - c = self.colored - if direction == '<-' and self.quiet: - return - dirstr = not self.quiet and c.bold(c.white(direction), ' ') or '' - self.out(c.reset(dirstr, title)) - if body and self.show_body: - self.out(body) - class inspect(_RemoteControl): """Inspect the worker at runtime. @@ -556,8 +341,8 @@ class inspect(_RemoteControl): Examples:: celery inspect active --timeout=5 - celery inspect scheduled -d worker1.example.com - celery inspect revoked -d w1.e.com,w2.e.com + celery inspect scheduled -d worker1@example.com + celery inspect revoked -d w1@e.com,w2@e.com """ name = 'inspect' @@ -570,13 +355,23 @@ class inspect(_RemoteControl): 'revoked': (1.0, 'dump of revoked task ids'), 'registered': (1.0, 'dump of registered tasks'), 'ping': (0.2, 'ping worker(s)'), - 'report': (1.0, 'get bugreport info') + 'clock': (1.0, 'get value of logical clock'), + 'conf': (1.0, 'dump worker configuration'), + 'report': (1.0, 'get bugreport info'), + 'memsample': (1.0, 'sample memory (requires psutil)'), + 'memdump': (1.0, 'dump memory samples (requires psutil)'), + 'objgraph': (60.0, 'create object graph (requires objgraph)'), } def call(self, method, *args, **options): i = self.app.control.inspect(**options) return getattr(i, method)(*args) -inspect = command(inspect) + + def objgraph(self, type_='Request', *args, **kwargs): + return self.call('objgraph', type_) + + def conf(self, with_defaults=False, *args, **kwargs): + return self.call('conf', with_defaults=with_defaults) class control(_RemoteControl): @@ -587,7 +382,7 @@ class control(_RemoteControl): Examples:: celery control enable_events --timeout=5 - celery control -d worker1.example.com enable_events + celery control -d worker1@example.com enable_events celery control -d w1.e.com,w2.e.com enable_events celery control -d w1.e.com add_consumer queue_name @@ -612,21 +407,19 @@ class control(_RemoteControl): } def call(self, method, *args, **options): - # XXX Python 2.5 doesn't support X(*args, reply=True, **kwargs) - return getattr(self.app.control, method)( - *args, **dict(options, reply=True)) + return getattr(self.app.control, method)(*args, reply=True, **options) def pool_grow(self, method, n=1, **kwargs): """[N=1]""" - return self.call(method, n, **kwargs) + return self.call(method, int(n), **kwargs) def pool_shrink(self, method, n=1, **kwargs): """[N=1]""" - return self.call(method, n, **kwargs) + return self.call(method, int(n), **kwargs) def autoscale(self, method, max=None, min=None, **kwargs): """[max] [min]""" - return self.call(method, max, min, **kwargs) + return self.call(method, int(max), int(min), **kwargs) def rate_limit(self, method, task_name, rate_limit, **kwargs): """ (e.g. 5/s | 5/m | 5/h)>""" @@ -634,7 +427,8 @@ class control(_RemoteControl): def time_limit(self, method, task_name, soft, hard=None, **kwargs): """ [hard_secs]""" - return self.call(method, task_name, soft, hard, reply=True, **kwargs) + return self.call(method, task_name, + float(soft), float(hard), reply=True, **kwargs) def add_consumer(self, method, queue, exchange=None, exchange_type='direct', routing_key=None, **kwargs): @@ -645,7 +439,6 @@ class control(_RemoteControl): def cancel_consumer(self, method, queue, **kwargs): """""" return self.call(method, queue, reply=True, **kwargs) -control = command(control) class status(Command): @@ -653,20 +446,20 @@ class status(Command): option_list = inspect.option_list def run(self, *args, **kwargs): - replies = inspect( + I = inspect( app=self.app, no_color=kwargs.get('no_color', False), stdout=self.stdout, stderr=self.stderr, - show_reply=False).run( - 'ping', **dict(kwargs, quiet=True, show_body=False)) + show_reply=False, show_body=False, quiet=True, + ) + replies = I.run('ping', **kwargs) if not replies: - raise Error('No nodes replied within time constraint', - status=EX_UNAVAILABLE) + raise self.Error('No nodes replied within time constraint', + status=EX_UNAVAILABLE) nodecount = len(replies) if not kwargs.get('quiet', False): - self.out('\n%s %s online.' % (nodecount, - text.pluralize(nodecount, 'node'))) -status = command(status) + self.out('\n{0} {1} online.'.format( + nodecount, text.pluralize(nodecount, 'node'))) class migrate(Command): @@ -695,22 +488,19 @@ class migrate(Command): Option('--forever', '-F', action='store_true', help='Continually migrate tasks until killed.'), ) + progress_fmt = MIGRATE_PROGRESS_FMT def on_migrate_task(self, state, body, message): - self.out('Migrating task %s/%s: %s[%s]' % ( - state.count, state.strtotal, body['task'], body['id'])) + self.out(self.progress_fmt.format(state=state, body=body)) - def run(self, *args, **kwargs): - if len(args) != 2: - return self.show_help('migrate') + def run(self, source, destination, **kwargs): from kombu import Connection from celery.contrib.migrate import migrate_tasks - migrate_tasks(Connection(args[0]), - Connection(args[1]), + migrate_tasks(Connection(source), + Connection(destination), callback=self.on_migrate_task, **kwargs) -migrate = command(migrate) class shell(Command): # pragma: no cover @@ -723,18 +513,6 @@ class shell(Command): # pragma: no cover xmap, xstarmap subtask, Task - all registered tasks. - Example Session: - - .. code-block:: bash - - $ celery shell - - >>> celery - - >>> add - <@task: tasks.add> - >>> add.delay(2, 2) - """ option_list = Command.option_list + ( Option('--ipython', '-I', @@ -776,7 +554,7 @@ class shell(Command): # pragma: no cover if not without_tasks: self.locals.update(dict( - (task.__name__, task) for task in self.app.tasks.itervalues() + (task.__name__, task) for task in values(self.app.tasks) if not task.name.startswith('celery.')), ) @@ -826,22 +604,19 @@ class shell(Command): # pragma: no cover import bpython bpython.embed(self.locals) -shell = command(shell) - class help(Command): """Show help screen and exit.""" def usage(self, command): - return '%%prog [options] %s' % (self.args, ) + return '%prog [options] {0.args}'.format(self) def run(self, *args, **kwargs): self.parser.print_help() - self.out(HELP % {'prog_name': self.prog_name, - 'commands': CeleryCommand.list_commands()}) + self.out(HELP.format(prog_name=self.prog_name, + commands=CeleryCommand.list_commands())) return EX_USAGE -help = command(help) class report(Command): @@ -850,14 +625,39 @@ class report(Command): def run(self, *args, **kwargs): self.out(self.app.bugreport()) return EX_OK -report = command(report) -class CeleryCommand(BaseCommand): - commands = commands +class CeleryCommand(Command): + namespace = 'celery' + ext_fmt = '{self.namespace}.commands' + commands = { + 'amqp': amqp, + 'beat': beat, + 'call': call, + 'control': control, + 'events': events, + 'graph': graph, + 'help': help, + 'inspect': inspect, + 'list': list_, + 'migrate': migrate, + 'multi': multi, + 'purge': purge, + 'report': report, + 'result': result, + 'shell': shell, + 'status': status, + 'worker': worker, + + } enable_config_from_cmdline = True prog_name = 'celery' + @classmethod + def register_command(cls, fun, name=None): + cls.commands[name or fun.__name__] = fun + return fun + def execute(self, command, argv=None): try: cls = self.commands[command] @@ -865,9 +665,26 @@ class CeleryCommand(BaseCommand): cls, argv = self.commands['help'], ['help'] cls = self.commands.get(command) or self.commands['help'] try: - return cls(app=self.app).run_from_argv(self.prog_name, argv) - except Error: - return self.execute('help', argv) + return cls( + app=self.app, on_error=self.on_error, + on_usage_error=partial(self.on_usage_error, command=command), + ).run_from_argv(self.prog_name, argv[1:], command=argv[0]) + except self.UsageError as exc: + self.on_usage_error(exc) + return exc.status + except self.Error as exc: + self.on_error(exc) + return exc.status + + def on_usage_error(self, exc, command=None): + if command: + helps = '{self.prog_name} {command} --help' + else: + helps = '{self.prog_name} --help' + self.error(self.colored.magenta("Error: {0}".format(exc))) + self.error("""Please try '{0}'""".format(helps.format( + self=self, command=command, + ))) def remove_options_at_beginning(self, argv, index=0): if argv: @@ -882,8 +699,13 @@ class CeleryCommand(BaseCommand): index += 1 return [] + def prepare_prog_name(self, name): + if name == '__main__.py': + return sys.modules['__main__'].__file__ + return name + def handle_argv(self, prog_name, argv): - self.prog_name = prog_name + self.prog_name = self.prepare_prog_name(prog_name) argv = self.remove_options_at_beginning(argv) _, argv = self.prepare_args(None, argv) try: @@ -906,12 +728,13 @@ class CeleryCommand(BaseCommand): def get_command_info(self, command, indent=0, color=None): colored = term.colored().names[color] if color else lambda x: x obj = self.commands[command] + cmd = 'celery {0}'.format(colored(command)) if obj.leaf: - return '|' + text.indent('celery %s' % colored(command), indent) + return '|' + text.indent(cmd, indent) return text.join([ ' ', - '|' + text.indent('celery %s --help' % colored(command), indent), - obj.list_commands(indent, 'celery %s' % command, colored), + '|' + text.indent('{0} --help'.format(cmd), indent), + obj.list_commands(indent, 'celery {0}'.format(command), colored), ]) @classmethod @@ -920,7 +743,7 @@ class CeleryCommand(BaseCommand): ret = [] for cls, commands, color in command_classes: ret.extend([ - text.indent('+ %s: ' % white(cls), indent), + text.indent('+ {0}: '.format(white(cls)), indent), '\n'.join(self.get_command_info(command, indent + 4, color) for command in commands), '' @@ -928,35 +751,26 @@ class CeleryCommand(BaseCommand): return '\n'.join(ret).strip() def with_pool_option(self, argv): - if len(argv) > 1 and argv[1] == 'worker': + if len(argv) > 1 and 'worker' in argv[0:3]: # this command supports custom pools # that may have to be loaded as early as possible. return (['-P'], ['--pool']) def on_concurrency_setup(self): - load_extension_commands() + self.load_extension_commands() + + def load_extension_commands(self): + names = Extensions(self.ext_fmt.format(self=self), + self.register_command).load() + if names: + command_classes.append(('Extensions', names, 'magenta')) -def determine_exit_status(ret): - if isinstance(ret, int): - return ret - return EX_OK if ret else EX_FAILURE - - -def main(argv=None): - # Fix for setuptools generated scripts, so that it will - # work with multiprocessing fork emulation. - # (see multiprocessing.forking.get_preparation_data()) - try: - if __name__ != '__main__': # pragma: no cover - sys.modules['__main__'] = sys.modules[__name__] - cmd = CeleryCommand() - cmd.maybe_patch_concurrency() - from billiard import freeze_support - freeze_support() - cmd.execute_from_commandline(argv) - except KeyboardInterrupt: - pass +def command(*args, **kwargs): + """Deprecated: Use classmethod :meth:`CeleryCommand.register_command` + instead.""" + _register = CeleryCommand.register_command + return _register(args[0]) if args else _register if __name__ == '__main__': # pragma: no cover diff --git a/awx/lib/site-packages/celery/bin/celeryctl.py b/awx/lib/site-packages/celery/bin/celeryctl.py deleted file mode 100644 index b653924ebd..0000000000 --- a/awx/lib/site-packages/celery/bin/celeryctl.py +++ /dev/null @@ -1,16 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.bin.celeryctl - ~~~~~~~~~~~~~~~~~~~~ - - Now replaced by the :program:`celery` command. - -""" -from __future__ import absolute_import - -from celery.bin.celery import ( # noqa - CeleryCommand as celeryctl, Command, main, -) - -if __name__ == '__main__': # pragma: no cover - main() diff --git a/awx/lib/site-packages/celery/bin/celeryd_detach.py b/awx/lib/site-packages/celery/bin/celeryd_detach.py index f2462952ff..1db2ff041d 100644 --- a/awx/lib/site-packages/celery/bin/celeryd_detach.py +++ b/awx/lib/site-packages/celery/bin/celeryd_detach.py @@ -3,7 +3,7 @@ celery.bin.celeryd_detach ~~~~~~~~~~~~~~~~~~~~~~~~~ - Program used to daemonize celeryd. + Program used to daemonize the worker Using :func:`os.execv` because forking and multiprocessing leads to weird issues (it was a long time ago now, but it @@ -11,7 +11,6 @@ """ from __future__ import absolute_import -from __future__ import with_statement import celery import os @@ -24,8 +23,12 @@ from celery.utils.log import get_logger from celery.bin.base import daemon_options, Option +__all__ = ['detached_celeryd', 'detach'] + logger = get_logger(__name__) +C_FAKEFORK = os.environ.get('C_FAKEFORK') + OPTION_LIST = daemon_options(default_pidfile='celeryd.pid') + ( Option('--fake', default=False, action='store_true', dest='fake', @@ -34,13 +37,16 @@ OPTION_LIST = daemon_options(default_pidfile='celeryd.pid') + ( def detach(path, argv, logfile=None, pidfile=None, uid=None, - gid=None, umask=0, working_directory=None, fake=False, ): + gid=None, umask=0, working_directory=None, fake=False, app=None): + fake = 1 if C_FAKEFORK else fake with detached(logfile, pidfile, uid, gid, umask, working_directory, fake): try: os.execv(path, [path] + argv) except Exception: - from celery import current_app - current_app.log.setup_logging_subsystem('ERROR', logfile) + if app is None: + from celery import current_app + app = current_app + app.log.setup_logging_subsystem('ERROR', logfile) logger.critical("Can't exec %r", ' '.join([path] + argv), exc_info=True) return EX_FAILURE @@ -74,9 +80,9 @@ class PartialOptionParser(OptionParser): nargs = option.nargs if len(rargs) < nargs: if nargs == 1: - self.error('%s option requires an argument' % opt) + self.error('{0} requires an argument'.format(opt)) else: - self.error('%s option requires %d arguments' % ( + self.error('{0} requires {1} arguments'.format( opt, nargs)) elif nargs == 1: value = rargs.pop(0) @@ -85,7 +91,7 @@ class PartialOptionParser(OptionParser): del rargs[0:nargs] elif had_explicit_value: - self.error('%s option does not take a value' % opt) + self.error('{0} option does not take a value'.format(opt)) else: value = None option.process(opt, value, values, self) @@ -106,11 +112,17 @@ class detached_celeryd(object): option_list = OPTION_LIST usage = '%prog [options] [celeryd options]' version = celery.VERSION_BANNER - description = ('Detaches Celery worker nodes. See `celeryd --help` ' + description = ('Detaches Celery worker nodes. See `celery worker --help` ' 'for the list of supported worker arguments.') command = sys.executable execv_path = sys.executable - execv_argv = ['-m', 'celery.bin.celeryd'] + if sys.version_info < (2, 7): # does not support pkg/__main__.py + execv_argv = ['-m', 'celery.__main__', 'worker'] + else: + execv_argv = ['-m', 'celery', 'worker'] + + def __init__(self, app=None): + self.app = app def Parser(self, prog_name): return PartialOptionParser(prog=prog_name, @@ -123,9 +135,9 @@ class detached_celeryd(object): parser = self.Parser(prog_name) options, values = parser.parse_args(argv) if options.logfile: - parser.leftovers.append('--logfile=%s' % (options.logfile, )) + parser.leftovers.append('--logfile={0}'.format(options.logfile)) if options.pidfile: - parser.leftovers.append('--pidfile=%s' % (options.pidfile, )) + parser.leftovers.append('--pidfile={0}'.format(options.pidfile)) return options, values, parser.leftovers def execute_from_commandline(self, argv=None): @@ -142,13 +154,15 @@ class detached_celeryd(object): config.append(arg) prog_name = os.path.basename(argv[0]) options, values, leftovers = self.parse_options(prog_name, argv[1:]) - sys.exit(detach(path=self.execv_path, - argv=self.execv_argv + leftovers + config, - **vars(options))) + sys.exit(detach( + app=self.app, path=self.execv_path, + argv=self.execv_argv + leftovers + config, + **vars(options) + )) -def main(): - detached_celeryd().execute_from_commandline() +def main(app=None): + detached_celeryd(app).execute_from_commandline() if __name__ == '__main__': # pragma: no cover main() diff --git a/awx/lib/site-packages/celery/bin/celeryev.py b/awx/lib/site-packages/celery/bin/events.py similarity index 70% rename from awx/lib/site-packages/celery/bin/celeryev.py rename to awx/lib/site-packages/celery/bin/events.py index 89408496b6..f0142ffdc9 100644 --- a/awx/lib/site-packages/celery/bin/celeryev.py +++ b/awx/lib/site-packages/celery/bin/events.py @@ -36,7 +36,6 @@ The :program:`celery events` command. """ from __future__ import absolute_import -from __future__ import with_statement import sys @@ -45,13 +44,33 @@ from functools import partial from celery.platforms import detached, set_process_title, strargv from celery.bin.base import Command, Option, daemon_options +__all__ = ['events'] -class EvCommand(Command): + +class events(Command): + """Event-stream utilities. + + Commands:: + + celery events --app=proj + start graphical monitor (requires curses) + celery events -d --app=proj + dump events to screen. + celery events -b amqp:// + celery events -C [options] + run snapshot camera. + + Examples:: + + celery events + celery events -d + celery events -C mod.attr -F 1.0 --detach --maxrate=100/m -l info + """ doc = __doc__ supports_args = False def run(self, dump=False, camera=None, frequency=1.0, maxrate=None, - loglevel='INFO', logfile=None, prog_name='celeryev', + loglevel='INFO', logfile=None, prog_name='celery events', pidfile=None, uid=None, gid=None, umask=None, working_directory=None, detach=False, **kwargs): self.prog_name = prog_name @@ -94,23 +113,26 @@ class EvCommand(Command): return cam() def set_process_status(self, prog, info=''): - prog = '%s:%s' % (self.prog_name, prog) - info = '%s %s' % (info, strargv(sys.argv)) + prog = '{0}:{1}'.format(self.prog_name, prog) + info = '{0} {1}'.format(info, strargv(sys.argv)) return set_process_title(prog, info=info) def get_options(self): return ( - Option('-d', '--dump', action='store_true'), - Option('-c', '--camera'), - Option('--detach', action='store_true'), - Option('-F', '--frequency', '--freq', type='float', default=1.0), - Option('-r', '--maxrate'), - Option('-l', '--loglevel', default='INFO'), - ) + daemon_options(default_pidfile='celeryev.pid') + (Option('-d', '--dump', action='store_true'), + Option('-c', '--camera'), + Option('--detach', action='store_true'), + Option('-F', '--frequency', '--freq', + type='float', default=1.0), + Option('-r', '--maxrate'), + Option('-l', '--loglevel', default='INFO')) + + daemon_options(default_pidfile='celeryev.pid') + + tuple(self.app.user_options['events']) + ) def main(): - ev = EvCommand() + ev = events() ev.execute_from_commandline() if __name__ == '__main__': # pragma: no cover diff --git a/awx/lib/site-packages/celery/bin/graph.py b/awx/lib/site-packages/celery/bin/graph.py new file mode 100644 index 0000000000..5d5847672b --- /dev/null +++ b/awx/lib/site-packages/celery/bin/graph.py @@ -0,0 +1,191 @@ +# -*- coding: utf-8 -*- +""" + +The :program:`celery graph` command. + +.. program:: celery graph + +""" +from __future__ import absolute_import, unicode_literals + +from operator import itemgetter + +from celery.datastructures import DependencyGraph, GraphFormatter +from celery.five import items + +from .base import Command + +__all__ = ['graph'] + + +class graph(Command): + args = """ [arguments] + ..... bootsteps [worker] [consumer] + ..... workers [enumerate] + """ + + def run(self, what=None, *args, **kwargs): + map = {'bootsteps': self.bootsteps, 'workers': self.workers} + if not what: + raise self.UsageError('missing type') + elif what not in map: + raise self.Error('no graph {0} in {1}'.format(what, '|'.join(map))) + return map[what](*args, **kwargs) + + def bootsteps(self, *args, **kwargs): + worker = self.app.WorkController() + include = set(arg.lower() for arg in args or ['worker', 'consumer']) + if 'worker' in include: + graph = worker.blueprint.graph + if 'consumer' in include: + worker.blueprint.connect_with(worker.consumer.blueprint) + else: + graph = worker.consumer.blueprint.graph + graph.to_dot(self.stdout) + + def workers(self, *args, **kwargs): + + def simplearg(arg): + return maybe_list(itemgetter(0, 2)(arg.partition(':'))) + + def maybe_list(l, sep=','): + return (l[0], l[1].split(sep) if sep in l[1] else l[1]) + + args = dict(simplearg(arg) for arg in args) + generic = 'generic' in args + + def generic_label(node): + return '{0} ({1}://)'.format(type(node).__name__, + node._label.split('://')[0]) + + class Node(object): + force_label = None + scheme = {} + + def __init__(self, label, pos=None): + self._label = label + self.pos = pos + + def label(self): + return self._label + + def __str__(self): + return self.label() + + class Thread(Node): + scheme = {'fillcolor': 'lightcyan4', 'fontcolor': 'yellow', + 'shape': 'oval', 'fontsize': 10, 'width': 0.3, + 'color': 'black'} + + def __init__(self, label, **kwargs): + self._label = 'thr-{0}'.format(next(tids)) + self.real_label = label + self.pos = 0 + + class Formatter(GraphFormatter): + + def label(self, obj): + return obj and obj.label() + + def node(self, obj): + scheme = dict(obj.scheme) if obj.pos else obj.scheme + if isinstance(obj, Thread): + scheme['label'] = obj.real_label + return self.draw_node( + obj, dict(self.node_scheme, **scheme), + ) + + def terminal_node(self, obj): + return self.draw_node( + obj, dict(self.term_scheme, **obj.scheme), + ) + + def edge(self, a, b, **attrs): + if isinstance(a, Thread): + attrs.update(arrowhead='none', arrowtail='tee') + return self.draw_edge(a, b, self.edge_scheme, attrs) + + def subscript(n): + S = {'0': '₀', '1': '₁', '2': '₂', '3': '₃', '4': '₄', + '5': '₅', '6': '₆', '7': '₇', '8': '₈', '9': '₉'} + return ''.join([S[i] for i in str(n)]) + + class Worker(Node): + pass + + class Backend(Node): + scheme = {'shape': 'folder', 'width': 2, + 'height': 1, 'color': 'black', + 'fillcolor': 'peachpuff3', 'color': 'peachpuff4'} + + def label(self): + return generic_label(self) if generic else self._label + + class Broker(Node): + scheme = {'shape': 'circle', 'fillcolor': 'cadetblue3', + 'color': 'cadetblue4', 'height': 1} + + def label(self): + return generic_label(self) if generic else self._label + + from itertools import count + tids = count(1) + Wmax = int(args.get('wmax', 4) or 0) + Tmax = int(args.get('tmax', 3) or 0) + + def maybe_abbr(l, name, max=Wmax): + size = len(l) + abbr = max and size > max + if 'enumerate' in args: + l = ['{0}{1}'.format(name, subscript(i + 1)) + for i, obj in enumerate(l)] + if abbr: + l = l[0:max - 1] + [l[size - 1]] + l[max - 2] = '{0}⎨…{1}⎬'.format( + name[0], subscript(size - (max - 1))) + return l + + try: + workers = args['nodes'] + threads = args.get('threads') or [] + except KeyError: + replies = self.app.control.inspect().stats() + workers, threads = [], [] + for worker, reply in items(replies): + workers.append(worker) + threads.append(reply['pool']['max-concurrency']) + + wlen = len(workers) + backend = args.get('backend', self.app.conf.CELERY_RESULT_BACKEND) + threads_for = {} + workers = maybe_abbr(workers, 'Worker') + if Wmax and wlen > Wmax: + threads = threads[0:3] + [threads[-1]] + for i, threads in enumerate(threads): + threads_for[workers[i]] = maybe_abbr( + list(range(int(threads))), 'P', Tmax, + ) + + broker = Broker(args.get('broker', self.app.connection().as_uri())) + backend = Backend(backend) if backend else None + graph = DependencyGraph(formatter=Formatter()) + graph.add_arc(broker) + if backend: + graph.add_arc(backend) + curworker = [0] + for i, worker in enumerate(workers): + worker = Worker(worker, pos=i) + graph.add_arc(worker) + graph.add_edge(worker, broker) + if backend: + graph.add_edge(worker, backend) + threads = threads_for.get(worker._label) + if threads: + for thread in threads: + thread = Thread(thread) + graph.add_arc(thread) + graph.add_edge(thread, worker) + + curworker[0] += 1 + + graph.to_dot(self.stdout) diff --git a/awx/lib/site-packages/celery/bin/celeryd_multi.py b/awx/lib/site-packages/celery/bin/multi.py similarity index 63% rename from awx/lib/site-packages/celery/bin/celeryd_multi.py rename to awx/lib/site-packages/celery/bin/multi.py index 5d9344c866..e2a583f3b8 100644 --- a/awx/lib/site-packages/celery/bin/celeryd_multi.py +++ b/awx/lib/site-packages/celery/bin/multi.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- """ -.. program:: celeryd-multi +.. program:: celery multi Examples ======== @@ -9,120 +9,128 @@ Examples .. code-block:: bash # Single worker with explicit name and events enabled. - $ celeryd-multi start Leslie -E + $ celery multi start Leslie -E # Pidfiles and logfiles are stored in the current directory # by default. Use --pidfile and --logfile argument to change - # this. The abbreviation %n will be expanded to the current + # this. The abbreviation %N will be expanded to the current # node name. - $ celeryd-multi start Leslie -E --pidfile=/var/run/celery/%n.pid - --logfile=/var/log/celery/%n.log + $ celery multi start Leslie -E --pidfile=/var/run/celery/%N.pid + --logfile=/var/log/celery/%N.log # You need to add the same arguments when you restart, # as these are not persisted anywhere. - $ celeryd-multi restart Leslie -E --pidfile=/var/run/celery/%n.pid - --logfile=/var/run/celery/%n.log + $ celery multi restart Leslie -E --pidfile=/var/run/celery/%N.pid + --logfile=/var/run/celery/%N.log # To stop the node, you need to specify the same pidfile. - $ celeryd-multi stop Leslie --pidfile=/var/run/celery/%n.pid + $ celery multi stop Leslie --pidfile=/var/run/celery/%N.pid # 3 workers, with 3 processes each - $ celeryd-multi start 3 -c 3 - celeryd -n celeryd1.myhost -c 3 - celeryd -n celeryd2.myhost -c 3 - celeryd- n celeryd3.myhost -c 3 + $ celery multi start 3 -c 3 + celery worker -n celery1@myhost -c 3 + celery worker -n celery2@myhost -c 3 + celery worker -n celery3@myhost -c 3 # start 3 named workers - $ celeryd-multi start image video data -c 3 - celeryd -n image.myhost -c 3 - celeryd -n video.myhost -c 3 - celeryd -n data.myhost -c 3 + $ celery multi start image video data -c 3 + celery worker -n image@myhost -c 3 + celery worker -n video@myhost -c 3 + celery worker -n data@myhost -c 3 # specify custom hostname - $ celeryd-multi start 2 -n worker.example.com -c 3 - celeryd -n celeryd1.worker.example.com -c 3 - celeryd -n celeryd2.worker.example.com -c 3 + $ celery multi start 2 --hostname=worker.example.com -c 3 + celery worker -n celery1@worker.example.com -c 3 + celery worker -n celery2@worker.example.com -c 3 + + # specify fully qualified nodenames + $ celery multi start foo@worker.example.com bar@worker.example.com -c 3 # Advanced example starting 10 workers in the background: # * Three of the workers processes the images and video queue # * Two of the workers processes the data queue with loglevel DEBUG # * the rest processes the default' queue. - $ celeryd-multi start 10 -l INFO -Q:1-3 images,video -Q:4,5 data + $ celery multi start 10 -l INFO -Q:1-3 images,video -Q:4,5 data -Q default -L:4,5 DEBUG # You can show the commands necessary to start the workers with # the 'show' command: - $ celeryd-multi show 10 -l INFO -Q:1-3 images,video -Q:4,5 data + $ celery multi show 10 -l INFO -Q:1-3 images,video -Q:4,5 data -Q default -L:4,5 DEBUG - # Additional options are added to each celeryd', + # Additional options are added to each celery worker' comamnd, # but you can also modify the options for ranges of, or specific workers # 3 workers: Two with 3 processes, and one with 10 processes. - $ celeryd-multi start 3 -c 3 -c:1 10 - celeryd -n celeryd1.myhost -c 10 - celeryd -n celeryd2.myhost -c 3 - celeryd -n celeryd3.myhost -c 3 + $ celery multi start 3 -c 3 -c:1 10 + celery worker -n celery1@myhost -c 10 + celery worker -n celery2@myhost -c 3 + celery worker -n celery3@myhost -c 3 # can also specify options for named workers - $ celeryd-multi start image video data -c 3 -c:image 10 - celeryd -n image.myhost -c 10 - celeryd -n video.myhost -c 3 - celeryd -n data.myhost -c 3 + $ celery multi start image video data -c 3 -c:image 10 + celery worker -n image@myhost -c 10 + celery worker -n video@myhost -c 3 + celery worker -n data@myhost -c 3 # ranges and lists of workers in options is also allowed: # (-c:1-3 can also be written as -c:1,2,3) - $ celeryd-multi start 5 -c 3 -c:1-3 10 - celeryd -n celeryd1.myhost -c 10 - celeryd -n celeryd2.myhost -c 10 - celeryd -n celeryd3.myhost -c 10 - celeryd -n celeryd4.myhost -c 3 - celeryd -n celeryd5.myhost -c 3 + $ celery multi start 5 -c 3 -c:1-3 10 + celery worker -n celery1@myhost -c 10 + celery worker -n celery2@myhost -c 10 + celery worker -n celery3@myhost -c 10 + celery worker -n celery4@myhost -c 3 + celery worker -n celery5@myhost -c 3 # lists also works with named workers - $ celeryd-multi start foo bar baz xuzzy -c 3 -c:foo,bar,baz 10 - celeryd -n foo.myhost -c 10 - celeryd -n bar.myhost -c 10 - celeryd -n baz.myhost -c 10 - celeryd -n xuzzy.myhost -c 3 + $ celery multi start foo bar baz xuzzy -c 3 -c:foo,bar,baz 10 + celery worker -n foo@myhost -c 10 + celery worker -n bar@myhost -c 10 + celery worker -n baz@myhost -c 10 + celery worker -n xuzzy@myhost -c 3 """ -from __future__ import absolute_import +from __future__ import absolute_import, print_function import errno import os +import shlex import signal import socket import sys -from collections import defaultdict +from collections import defaultdict, namedtuple from subprocess import Popen from time import sleep from kombu.utils import cached_property +from kombu.utils.compat import OrderedDict from kombu.utils.encoding import from_utf8 from celery import VERSION_BANNER -from celery.platforms import Pidfile, shellsplit -from celery.utils import term +from celery.five import items +from celery.platforms import Pidfile, IS_WINDOWS +from celery.utils import term, nodesplit from celery.utils.text import pluralize +__all__ = ['MultiTool'] + SIGNAMES = set(sig for sig in dir(signal) if sig.startswith('SIG') and '_' not in sig) SIGMAP = dict((getattr(signal, name), name) for name in SIGNAMES) USAGE = """\ -usage: %(prog_name)s start [celeryd options] - %(prog_name)s stop [-SIG (default: -TERM)] - %(prog_name)s restart [-SIG] [celeryd options] - %(prog_name)s kill +usage: {prog_name} start [worker options] + {prog_name} stop [-SIG (default: -TERM)] + {prog_name} restart [-SIG] [worker options] + {prog_name} kill - %(prog_name)s show [celeryd options] - %(prog_name)s get hostname [-qv] [celeryd options] - %(prog_name)s names - %(prog_name)s expand template - %(prog_name)s help + {prog_name} show [worker options] + {prog_name} get hostname [-qv] [worker options] + {prog_name} names + {prog_name} expand template + {prog_name} help additional options (must appear after command name): @@ -132,11 +140,25 @@ additional options (must appear after command name): * --no-color: Don't display colors. """ +multi_args_t = namedtuple( + 'multi_args_t', ('name', 'argv', 'expander', 'namespace'), +) + def main(): sys.exit(MultiTool().execute_from_commandline(sys.argv)) +CELERY_EXE = 'celery' +if sys.version_info < (2, 7): + # pkg.__main__ first supported in Py2.7 + CELERY_EXE = 'celery.__main__' + + +def celery_exe(*args): + return ' '.join((CELERY_EXE, ) + args) + + class MultiTool(object): retcode = 0 # Final exit code. @@ -148,7 +170,7 @@ class MultiTool(object): self.quiet = quiet self.verbose = verbose self.no_color = no_color - self.prog_name = 'celeryd-multi' + self.prog_name = 'celery multi' self.commands = {'start': self.start, 'show': self.show, 'stop': self.stop, @@ -161,7 +183,7 @@ class MultiTool(object): 'get': self.get, 'help': self.help} - def execute_from_commandline(self, argv, cmd='celeryd'): + def execute_from_commandline(self, argv, cmd='celery worker'): argv = list(argv) # don't modify callers argv. # Reserve the --nosplash|--quiet|-q/--verbose options. @@ -183,32 +205,32 @@ class MultiTool(object): try: self.commands[argv[0]](argv[1:], cmd) except KeyError: - self.error('Invalid command: %s' % argv[0]) + self.error('Invalid command: {0}'.format(argv[0])) return self.retcode def say(self, m, newline=True): - self.fh.write('%s%s' % (m, '\n' if newline else '')) + print(m, file=self.fh, end='\n' if newline else '') def names(self, argv, cmd): p = NamespacedOptionParser(argv) self.say('\n'.join( - hostname for hostname, _, _ in multi_args(p, cmd)), + n.name for n in multi_args(p, cmd)), ) def get(self, argv, cmd): wanted = argv[0] p = NamespacedOptionParser(argv[1:]) - for name, worker, _ in multi_args(p, cmd): - if name == wanted: - self.say(' '.join(worker)) + for node in multi_args(p, cmd): + if node.name == wanted: + self.say(' '.join(node.argv)) return def show(self, argv, cmd): p = NamespacedOptionParser(argv) self.note('> Starting nodes...') self.say('\n'.join( - ' '.join(worker) for _, worker, _ in multi_args(p, cmd)), + ' '.join(n.argv) for n in multi_args(p, cmd)), ) def start(self, argv, cmd): @@ -217,25 +239,28 @@ class MultiTool(object): self.with_detacher_default_options(p) retcodes = [] self.note('> Starting nodes...') - for nodename, argv, _ in multi_args(p, cmd): - self.note('\t> %s: ' % (nodename, ), newline=False) - retcode = self.waitexec(argv) + for node in multi_args(p, cmd): + self.note('\t> {0}: '.format(node.name), newline=False) + retcode = self.waitexec(node.argv) self.note(retcode and self.FAILED or self.OK) retcodes.append(retcode) self.retcode = int(any(retcodes)) def with_detacher_default_options(self, p): - p.options.setdefault('--pidfile', 'celeryd@%n.pid') - p.options.setdefault('--logfile', 'celeryd@%n.log') - p.options.setdefault('--cmd', '-m celery.bin.celeryd_detach') + _setdefaultopt(p.options, ['--pidfile', '-p'], '%N.pid') + _setdefaultopt(p.options, ['--logfile', '-f'], '%N.log') + p.options.setdefault( + '--cmd', + '-m {0}'.format(celery_exe('worker', '--detach')), + ) def signal_node(self, nodename, pid, sig): try: os.kill(pid, sig) - except OSError, exc: + except OSError as exc: if exc.errno != errno.ESRCH: raise - self.note('Could not signal %s (%s): No such process' % ( + self.note('Could not signal {0} ({1}): No such process'.format( nodename, pid)) return False return True @@ -243,7 +268,7 @@ class MultiTool(object): def node_alive(self, pid): try: os.kill(pid, 0) - except OSError, exc: + except OSError as exc: if exc.errno == errno.ESRCH: return False raise @@ -264,9 +289,8 @@ class MultiTool(object): for node in list(P): if node in P: nodename, _, pid = node - self.note('\t> %s: %s -> %s' % (nodename, - SIGMAP[sig][3:], - pid)) + self.note('\t> {0}: {1} -> {2}'.format( + nodename, SIGMAP[sig][3:], pid)) if not self.signal_node(nodename, pid, sig): on_down(node) @@ -274,8 +298,9 @@ class MultiTool(object): left = len(P) if left: pids = ', '.join(str(pid) for _, _, pid in P) - self.note(self.colored.blue('> Waiting for %s %s -> %s...' % ( - left, pluralize(left, 'node'), pids)), newline=False) + self.note(self.colored.blue( + '> Waiting for {0} {1} -> {2}...'.format( + left, pluralize(left, 'node'), pids)), newline=False) if retry: note_waiting() @@ -286,7 +311,7 @@ class MultiTool(object): self.note('.', newline=False) nodename, _, pid = node if not self.node_alive(pid): - self.note('\n\t> %s: %s' % (nodename, self.OK)) + self.note('\n\t> {0}: {1}'.format(nodename, self.OK)) on_down(node) note_waiting() break @@ -295,22 +320,28 @@ class MultiTool(object): self.note('') def getpids(self, p, cmd, callback=None): - pidfile_template = p.options.setdefault('--pidfile', 'celeryd@%n.pid') + _setdefaultopt(p.options, ['--pidfile', '-p'], '%N.pid') nodes = [] - for nodename, argv, expander in multi_args(p, cmd): + for node in multi_args(p, cmd): + try: + pidfile_template = _getopt( + p.namespaces[node.namespace], ['--pidfile', '-p'], + ) + except KeyError: + pidfile_template = _getopt(p.options, ['--pidfile', '-p']) pid = None - pidfile = expander(pidfile_template) + pidfile = node.expander(pidfile_template) try: pid = Pidfile(pidfile).read_pid() except ValueError: pass if pid: - nodes.append((nodename, tuple(argv), pid)) + nodes.append((node.name, tuple(node.argv), pid)) else: - self.note('> %s: %s' % (nodename, self.DOWN)) + self.note('> {0.name}: {1}'.format(node, self.DOWN)) if callback: - callback(nodename, argv, pid) + callback(node.name, node.argv, pid) return nodes @@ -318,7 +349,7 @@ class MultiTool(object): self.splash() p = NamespacedOptionParser(argv) for nodename, _, pid in self.getpids(p, cmd): - self.note('Killing node %s (%s)' % (nodename, pid)) + self.note('Killing node {0} ({1})'.format(nodename, pid)) self.signal_node(nodename, pid, signal.SIGKILL) def stop(self, argv, cmd, retry=None, callback=None): @@ -341,7 +372,7 @@ class MultiTool(object): def on_node_shutdown(nodename, argv, pid): self.note(self.colored.blue( - '> Restarting node %s: ' % nodename), newline=False) + '> Restarting node {0}: '.format(nodename)), newline=False) retval = self.waitexec(argv) self.note(retval and self.FAILED or self.OK) retvals.append(retval) @@ -354,37 +385,37 @@ class MultiTool(object): p = NamespacedOptionParser(argv) self.with_detacher_default_options(p) return self._stop_nodes(p, cmd, retry=2) - stop_verify = stopwait # compat + stop_verify = stopwait # compat def expand(self, argv, cmd=None): template = argv[0] p = NamespacedOptionParser(argv[1:]) - for _, _, expander in multi_args(p, cmd): - self.say(expander(template)) + for node in multi_args(p, cmd): + self.say(node.expander(template)) def help(self, argv, cmd=None): self.say(__doc__) def usage(self): self.splash() - self.say(USAGE % {'prog_name': self.prog_name}) + self.say(USAGE.format(prog_name=self.prog_name)) def splash(self): if not self.nosplash: c = self.colored - self.note(c.cyan('celeryd-multi v%s' % VERSION_BANNER)) + self.note(c.cyan('celery multi v{0}'.format(VERSION_BANNER))) def waitexec(self, argv, path=sys.executable): args = ' '.join([path] + list(argv)) - argstr = shellsplit(from_utf8(args)) + argstr = shlex.split(from_utf8(args), posix=not IS_WINDOWS) pipe = Popen(argstr, env=self.env) - self.info(' %s' % ' '.join(argstr)) + self.info(' {0}'.format(' '.join(argstr))) retcode = pipe.wait() if retcode < 0: - self.note('* Child was terminated by signal %s' % (-retcode, )) + self.note('* Child was terminated by signal {0}'.format(-retcode)) return -retcode elif retcode > 0: - self.note('* Child terminated with failure code %s' % (retcode, )) + self.note('* Child terminated with errorcode {0}'.format(retcode)) return retcode def error(self, msg=None): @@ -419,7 +450,7 @@ class MultiTool(object): return str(self.colored.magenta('DOWN')) -def multi_args(p, cmd='celeryd', append='', prefix='', suffix=''): +def multi_args(p, cmd='celery worker', append='', prefix='', suffix=''): names = p.values options = dict(p.options) passthrough = p.passthrough @@ -430,44 +461,53 @@ def multi_args(p, cmd='celeryd', append='', prefix='', suffix=''): except ValueError: pass else: - names = [str(v) for v in range(1, noderange + 1)] + names = [str(n) for n in range(1, noderange + 1)] prefix = 'celery' cmd = options.pop('--cmd', cmd) append = options.pop('--append', append) hostname = options.pop('--hostname', options.pop('-n', socket.gethostname())) prefix = options.pop('--prefix', prefix) or '' - suffix = options.pop('--suffix', suffix) or '.' + hostname + suffix = options.pop('--suffix', suffix) or hostname if suffix in ('""', "''"): suffix = '' - for ns_name, ns_opts in p.namespaces.items(): + for ns_name, ns_opts in list(items(p.namespaces)): if ',' in ns_name or (ranges and '-' in ns_name): for subns in parse_ns_range(ns_name, ranges): p.namespaces[subns].update(ns_opts) p.namespaces.pop(ns_name) for name in names: - this_name = options['-n'] = prefix + name + suffix + this_suffix = suffix + if '@' in name: + this_name = options['-n'] = name + nodename, this_suffix = nodesplit(name) + name = nodename + else: + nodename = '%s%s' % (prefix, name) + this_name = options['-n'] = '%s@%s' % (nodename, this_suffix) expand = abbreviations({'%h': this_name, - '%n': name}) + '%n': name, + '%N': nodename, + '%d': this_suffix}) argv = ([expand(cmd)] + [format_opt(opt, expand(value)) - for opt, value in p.optmerge(name, options).items()] + + for opt, value in items(p.optmerge(name, options))] + [passthrough]) if append: argv.append(expand(append)) - yield this_name, argv, expand + yield multi_args_t(this_name, argv, expand, name) class NamespacedOptionParser(object): def __init__(self, args): self.args = args - self.options = {} + self.options = OrderedDict() self.values = [] self.passthrough = '' - self.namespaces = defaultdict(lambda: {}) + self.namespaces = defaultdict(lambda: OrderedDict()) self.parse() @@ -503,7 +543,7 @@ class NamespacedOptionParser(object): def optmerge(self, ns, defaults=None): if defaults is None: defaults = self.options - return dict(defaults, **self.namespaces[ns]) + return OrderedDict(defaults, **self.namespaces[ns]) def add_option(self, name, value, short=False, ns=None): prefix = short and '-' or '--' @@ -522,8 +562,8 @@ def format_opt(opt, value): if not value: return opt if opt.startswith('--'): - return '%s=%s' % (opt, value) - return '%s %s' % (opt, value) + return '{0}={1}'.format(opt, value) + return '{0} {1}'.format(opt, value) def parse_ns_range(ns, ranges=False): @@ -531,8 +571,9 @@ def parse_ns_range(ns, ranges=False): for space in ',' in ns and ns.split(',') or [ns]: if ranges and '-' in space: start, stop = space.split('-') - x = [str(v) for v in range(int(start), int(stop) + 1)] - ret.extend(x) + ret.extend( + str(n) for n in range(int(start), int(stop) + 1) + ) else: ret.append(space) return ret @@ -543,8 +584,8 @@ def abbreviations(mapping): def expand(S): ret = S if S is not None: - for short, long in mapping.items(): - ret = ret.replace(short, long) + for short_opt, long_opt in items(mapping): + ret = ret.replace(short_opt, long_opt) return ret return expand @@ -563,5 +604,24 @@ def findsig(args, default=signal.SIGTERM): return getattr(signal, maybe_sig) return default + +def _getopt(d, alt): + for opt in alt: + try: + return d[opt] + except KeyError: + pass + raise KeyError(alt[0]) + + +def _setdefaultopt(d, alt, value): + for opt in alt[1:]: + try: + return d[opt] + except KeyError: + pass + return d.setdefault(alt[0], value) + + if __name__ == '__main__': # pragma: no cover main() diff --git a/awx/lib/site-packages/celery/bin/celeryd.py b/awx/lib/site-packages/celery/bin/worker.py similarity index 62% rename from awx/lib/site-packages/celery/bin/celeryd.py rename to awx/lib/site-packages/celery/bin/worker.py index 172c73239e..3c3065d901 100644 --- a/awx/lib/site-packages/celery/bin/celeryd.py +++ b/awx/lib/site-packages/celery/bin/worker.py @@ -18,7 +18,7 @@ The :program:`celery worker` command (previously known as ``celeryd``) Pool implementation: - processes (default), eventlet, gevent, solo or threads. + prefork (default), eventlet, gevent, solo or threads. .. cmdoption:: -f, --logfile @@ -31,11 +31,12 @@ The :program:`celery worker` command (previously known as ``celeryd``) .. cmdoption:: -n, --hostname - Set custom hostname, e.g. 'foo.example.com'. + Set custom hostname, e.g. 'w1.%h'. Expands: %h (hostname), + %n (name) and %d, (domain). .. cmdoption:: -B, --beat - Also run the `celerybeat` periodic task scheduler. Please note that + Also run the `celery beat` periodic task scheduler. Please note that there must only be one instance of this service. .. cmdoption:: -Q, --queues @@ -55,6 +56,10 @@ The :program:`celery worker` command (previously known as ``celeryd``) Defaults to `celerybeat-schedule`. The extension ".db" may be appended to the filename. +.. cmdoption:: -O + + Apply optimization profile. Supported: default, fair + .. cmdoption:: --scheduler Scheduler class to use. Default is celery.beat.PersistentScheduler @@ -62,13 +67,25 @@ The :program:`celery worker` command (previously known as ``celeryd``) .. cmdoption:: -S, --statedb Path to the state database. The extension '.db' may - be appended to the filename. Default: %(default)s + be appended to the filename. Default: {default} .. cmdoption:: -E, --events - Send events that can be captured by monitors like :program:`celeryev`, + Send events that can be captured by monitors like :program:`celery events`, `celerymon`, and others. +.. cmdoption:: --without-gossip + + Do not subscribe to other workers events. + +.. cmdoption:: --without-mingle + + Do not synchronize with other workers at startup. + +.. cmdoption:: --without-heartbeat + + Do not send event heartbeats. + .. cmdoption:: --purge Purges all waiting tasks before the daemon is started. @@ -118,39 +135,71 @@ from __future__ import absolute_import import sys from celery import concurrency -from celery.bin.base import Command, Option +from celery.bin.base import Command, Option, daemon_options +from celery.bin.celeryd_detach import detached_celeryd +from celery.five import string_t from celery.utils.log import LOG_LEVELS, mlevel +__all__ = ['worker', 'main'] -class WorkerCommand(Command): - doc = __doc__ # parse help from this. +__MODULE_DOC__ = __doc__ + + +class worker(Command): + """Start worker instance. + + Examples:: + + celery worker --app=proj -l info + celery worker -A proj -l info -Q hipri,lopri + + celery worker -A proj --concurrency=4 + celery worker -A proj --concurrency=1000 -P eventlet + + celery worker --autoscale=10,0 + """ + doc = __MODULE_DOC__ # parse help from this too namespace = 'celeryd' enable_config_from_cmdline = True supports_args = False - def execute_from_commandline(self, argv=None): - if argv is None: - argv = list(sys.argv) - return super(WorkerCommand, self).execute_from_commandline(argv) + def run_from_argv(self, prog_name, argv=None, command=None): + command = sys.argv[0] if command is None else command + argv = sys.argv[1:] if argv is None else argv + # parse options before detaching so errors can be handled. + options, args = self.prepare_args( + *self.parse_options(prog_name, argv, command)) + self.maybe_detach([command] + argv) + return self(*args, **options) - def run(self, *args, **kwargs): - kwargs.pop('app', None) + def maybe_detach(self, argv, dopts=['-D', '--detach']): + if any(arg in argv for arg in dopts): + argv = [v for v in argv if v not in dopts] + # will never return + detached_celeryd(self.app).execute_from_commandline(argv) + raise SystemExit(0) + + def run(self, hostname=None, pool_cls=None, loglevel=None, + app=None, **kwargs): # Pools like eventlet/gevent needs to patch libs as early # as possible. - kwargs['pool_cls'] = concurrency.get_implementation( - kwargs.get('pool_cls') or self.app.conf.CELERYD_POOL) + pool_cls = (concurrency.get_implementation(pool_cls) or + self.app.conf.CELERYD_POOL) if self.app.IS_WINDOWS and kwargs.get('beat'): self.die('-B option does not work on Windows. ' - 'Please run celerybeat as a separate service.') - loglevel = kwargs.get('loglevel') + 'Please run celery beat as a separate service.') + hostname = self.simple_format(hostname) if loglevel: try: - kwargs['loglevel'] = mlevel(loglevel) + loglevel = mlevel(loglevel) except KeyError: # pragma: no cover - self.die('Unknown level %r. Please use one of %s.' % ( - loglevel, '|'.join(l for l in LOG_LEVELS - if isinstance(l, basestring)))) - return self.app.Worker(**kwargs).run() + self.die('Unknown level {0!r}. Please use one of {1}.'.format( + loglevel, '|'.join( + l for l in LOG_LEVELS if isinstance(l, string_t)))) + + return self.app.Worker( + hostname=hostname, pool_cls=pool_cls, loglevel=loglevel, **kwargs + ).start() def with_pool_option(self, argv): # this command support custom pools @@ -164,7 +213,6 @@ class WorkerCommand(Command): default=conf.CELERYD_CONCURRENCY, type='int'), Option('-P', '--pool', default=conf.CELERYD_POOL, dest='pool_cls'), Option('--purge', '--discard', default=False, action='store_true'), - Option('-f', '--logfile', default=conf.CELERYD_LOG_FILE), Option('-l', '--loglevel', default=conf.CELERYD_LOG_LEVEL), Option('-n', '--hostname'), Option('-B', '--beat', action='store_true'), @@ -182,15 +230,20 @@ class WorkerCommand(Command): Option('--maxtasksperchild', dest='max_tasks_per_child', default=conf.CELERYD_MAX_TASKS_PER_CHILD, type='int'), Option('--queues', '-Q', default=[]), + Option('--exclude-queues', '-X', default=[]), Option('--include', '-I', default=[]), - Option('--pidfile'), Option('--autoscale'), Option('--autoreload', action='store_true'), Option('--no-execv', action='store_true', default=False), - ) + Option('--without-gossip', action='store_true', default=False), + Option('--without-mingle', action='store_true', default=False), + Option('--without-heartbeat', action='store_true', default=False), + Option('-O', dest='optimization'), + Option('-D', '--detach', action='store_true'), + ) + daemon_options() + tuple(self.app.user_options['worker']) -def main(): +def main(app=None): # Fix for setuptools generated scripts, so that it will # work with multiprocessing fork emulation. # (see multiprocessing.forking.get_preparation_data()) @@ -198,8 +251,7 @@ def main(): sys.modules['__main__'] = sys.modules[__name__] from billiard import freeze_support freeze_support() - worker = WorkerCommand() - worker.execute_from_commandline() + worker(app=app).execute_from_commandline() if __name__ == '__main__': # pragma: no cover diff --git a/awx/lib/site-packages/celery/bootsteps.py b/awx/lib/site-packages/celery/bootsteps.py new file mode 100644 index 0000000000..be6247abe6 --- /dev/null +++ b/awx/lib/site-packages/celery/bootsteps.py @@ -0,0 +1,420 @@ +# -*- coding: utf-8 -*- +""" + celery.bootsteps + ~~~~~~~~~~~~~~~~ + + A directed acyclic graph of reusable components. + +""" +from __future__ import absolute_import, unicode_literals + +from collections import deque +from threading import Event + +from kombu.common import ignore_errors +from kombu.utils import symbol_by_name + +from .datastructures import DependencyGraph, GraphFormatter +from .five import values, with_metaclass +from .utils.imports import instantiate, qualname +from .utils.log import get_logger +from .utils.threads import default_socket_timeout + +try: + from greenlet import GreenletExit + IGNORE_ERRORS = (GreenletExit, ) +except ImportError: # pragma: no cover + IGNORE_ERRORS = () + +__all__ = ['Blueprint', 'Step', 'StartStopStep', 'ConsumerStep'] + +#: Default socket timeout at shutdown. +SHUTDOWN_SOCKET_TIMEOUT = 5.0 + +#: States +RUN = 0x1 +CLOSE = 0x2 +TERMINATE = 0x3 + +logger = get_logger(__name__) +debug = logger.debug + + +def _pre(ns, fmt): + return '| {0}: {1}'.format(ns.alias, fmt) + + +def _label(s): + return s.name.rsplit('.', 1)[-1] + + +class StepFormatter(GraphFormatter): + """Graph formatter for :class:`Blueprint`.""" + + blueprint_prefix = '⧉' + conditional_prefix = '∘' + blueprint_scheme = { + 'shape': 'parallelogram', + 'color': 'slategray4', + 'fillcolor': 'slategray3', + } + + def label(self, step): + return step and '{0}{1}'.format( + self._get_prefix(step), + (step.label or _label(step)).encode('utf-8', 'ignore'), + ) + + def _get_prefix(self, step): + if step.last: + return self.blueprint_prefix + if step.conditional: + return self.conditional_prefix + return '' + + def node(self, obj, **attrs): + scheme = self.blueprint_scheme if obj.last else self.node_scheme + return self.draw_node(obj, scheme, attrs) + + def edge(self, a, b, **attrs): + if a.last: + attrs.update(arrowhead='none', color='darkseagreen3') + return self.draw_edge(a, b, self.edge_scheme, attrs) + + +class Blueprint(object): + """Blueprint containing bootsteps that can be applied to objects. + + :keyword steps: List of steps. + :keyword name: Set explicit name for this blueprint. + :keyword app: Set the Celery app for this blueprint. + :keyword on_start: Optional callback applied after blueprint start. + :keyword on_close: Optional callback applied before blueprint close. + :keyword on_stopped: Optional callback applied after blueprint stopped. + + """ + GraphFormatter = StepFormatter + + name = None + state = None + started = 0 + default_steps = set() + state_to_name = { + 0: 'initializing', + RUN: 'running', + CLOSE: 'closing', + TERMINATE: 'terminating', + } + + def __init__(self, steps=None, name=None, app=None, + on_start=None, on_close=None, on_stopped=None): + self.app = app + self.name = name or self.name or qualname(type(self)) + self.types = set(steps or []) | set(self.default_steps) + self.on_start = on_start + self.on_close = on_close + self.on_stopped = on_stopped + self.shutdown_complete = Event() + self.steps = {} + + def start(self, parent): + self.state = RUN + if self.on_start: + self.on_start() + for i, step in enumerate(s for s in parent.steps if s is not None): + self._debug('Starting %s', step.alias) + self.started = i + 1 + step.start(parent) + debug('^-- substep ok') + + def human_state(self): + return self.state_to_name[self.state or 0] + + def info(self, parent): + info = {} + for step in parent.steps: + info.update(step.info(parent) or {}) + return info + + def close(self, parent): + if self.on_close: + self.on_close() + self.send_all(parent, 'close', 'closing', reverse=False) + + def restart(self, parent, method='stop', + description='restarting', propagate=False): + self.send_all(parent, method, description, propagate=propagate) + + def send_all(self, parent, method, + description=None, reverse=True, propagate=True, args=()): + description = description or method.capitalize() + steps = reversed(parent.steps) if reverse else parent.steps + with default_socket_timeout(SHUTDOWN_SOCKET_TIMEOUT): # Issue 975 + for step in steps: + if step: + self._debug('%s %s...', + description.capitalize(), step.alias) + fun = getattr(step, method, None) + if fun: + try: + fun(parent, *args) + except Exception as exc: + if propagate: + raise + logger.error( + 'Error while %s %s: %r', + description, step.alias, exc, exc_info=1, + ) + + def stop(self, parent, close=True, terminate=False): + what = 'terminating' if terminate else 'stopping' + if self.state in (CLOSE, TERMINATE): + return + + if self.state != RUN or self.started != len(parent.steps): + # Not fully started, can safely exit. + self.state = TERMINATE + self.shutdown_complete.set() + return + self.close(parent) + self.state = CLOSE + + self.restart( + parent, 'terminate' if terminate else 'stop', + description=what, propagate=False, + ) + + if self.on_stopped: + self.on_stopped() + self.state = TERMINATE + self.shutdown_complete.set() + + def join(self, timeout=None): + try: + # Will only get here if running green, + # makes sure all greenthreads have exited. + self.shutdown_complete.wait(timeout=timeout) + except IGNORE_ERRORS: + pass + + def apply(self, parent, **kwargs): + """Apply the steps in this blueprint to an object. + + This will apply the ``__init__`` and ``include`` methods + of each step, with the object as argument:: + + step = Step(obj) + ... + step.include(obj) + + For :class:`StartStopStep` the services created + will also be added to the objects ``steps`` attribute. + + """ + self._debug('Preparing bootsteps.') + order = self.order = [] + steps = self.steps = self.claim_steps() + + self._debug('Building graph...') + for S in self._finalize_steps(steps): + step = S(parent, **kwargs) + steps[step.name] = step + order.append(step) + self._debug('New boot order: {%s}', + ', '.join(s.alias for s in self.order)) + for step in order: + step.include(parent) + return self + + def connect_with(self, other): + self.graph.adjacent.update(other.graph.adjacent) + self.graph.add_edge(type(other.order[0]), type(self.order[-1])) + + def __getitem__(self, name): + return self.steps[name] + + def _find_last(self): + return next((C for C in values(self.steps) if C.last), None) + + def _firstpass(self, steps): + stream = deque(step.requires for step in values(steps)) + while stream: + for node in stream.popleft(): + node = symbol_by_name(node) + if node.name not in self.steps: + steps[node.name] = node + stream.append(node.requires) + + def _finalize_steps(self, steps): + last = self._find_last() + self._firstpass(steps) + it = ((C, C.requires) for C in values(steps)) + G = self.graph = DependencyGraph( + it, formatter=self.GraphFormatter(root=last), + ) + if last: + for obj in G: + if obj != last: + G.add_edge(last, obj) + try: + return G.topsort() + except KeyError as exc: + raise KeyError('unknown bootstep: %s' % exc) + + def claim_steps(self): + return dict(self.load_step(step) for step in self._all_steps()) + + def _all_steps(self): + return self.types | self.app.steps[self.name.lower()] + + def load_step(self, step): + step = symbol_by_name(step) + return step.name, step + + def _debug(self, msg, *args): + return debug(_pre(self, msg), *args) + + @property + def alias(self): + return _label(self) + + +class StepType(type): + """Metaclass for steps.""" + + def __new__(cls, name, bases, attrs): + module = attrs.get('__module__') + qname = '{0}.{1}'.format(module, name) if module else name + attrs.update( + __qualname__=qname, + name=attrs.get('name') or qname, + requires=attrs.get('requires', ()), + ) + return super(StepType, cls).__new__(cls, name, bases, attrs) + + def __str__(self): + return self.name + + def __repr__(self): + return 'step:{0.name}{{{0.requires!r}}}'.format(self) + + +@with_metaclass(StepType) +class Step(object): + """A Bootstep. + + The :meth:`__init__` method is called when the step + is bound to a parent object, and can as such be used + to initialize attributes in the parent object at + parent instantiation-time. + + """ + + #: Optional step name, will use qualname if not specified. + name = None + + #: Optional short name used for graph outputs and in logs. + label = None + + #: Set this to true if the step is enabled based on some condition. + conditional = False + + #: List of other steps that that must be started before this step. + #: Note that all dependencies must be in the same blueprint. + requires = () + + #: This flag is reserved for the workers Consumer, + #: since it is required to always be started last. + #: There can only be one object marked last + #: in every blueprint. + last = False + + #: This provides the default for :meth:`include_if`. + enabled = True + + def __init__(self, parent, **kwargs): + pass + + def include_if(self, parent): + """An optional predicate that decides whether this + step should be created.""" + return self.enabled + + def instantiate(self, name, *args, **kwargs): + return instantiate(name, *args, **kwargs) + + def _should_include(self, parent): + if self.include_if(parent): + return True, self.create(parent) + return False, None + + def include(self, parent): + return self._should_include(parent)[0] + + def create(self, parent): + """Create the step.""" + pass + + def __repr__(self): + return ''.format(self) + + @property + def alias(self): + return self.label or _label(self) + + def info(self, obj): + pass + + +class StartStopStep(Step): + + #: Optional obj created by the :meth:`create` method. + #: This is used by :class:`StartStopStep` to keep the + #: original service object. + obj = None + + def start(self, parent): + if self.obj: + return self.obj.start() + + def stop(self, parent): + if self.obj: + return self.obj.stop() + + def close(self, parent): + pass + + def terminate(self, parent): + if self.obj: + return getattr(self.obj, 'terminate', self.obj.stop)() + + def include(self, parent): + inc, ret = self._should_include(parent) + if inc: + self.obj = ret + parent.steps.append(self) + return inc + + +class ConsumerStep(StartStopStep): + requires = ('Connection', ) + consumers = None + + def get_consumers(self, channel): + raise NotImplementedError('missing get_consumers') + + def start(self, c): + channel = c.connection.channel() + self.consumers = self.get_consumers(channel) + for consumer in self.consumers or []: + consumer.consume() + + def stop(self, c): + channels = set() + for consumer in self.consumers or []: + ignore_errors(c.connection, consumer.cancel) + if consumer.channel: + channels.add(consumer.channel) + for channel in channels: + ignore_errors(c.connection, channel.close) + shutdown = stop diff --git a/awx/lib/site-packages/celery/canvas.py b/awx/lib/site-packages/celery/canvas.py index c7011e172c..b35239e9fa 100644 --- a/awx/lib/site-packages/celery/canvas.py +++ b/awx/lib/site-packages/celery/canvas.py @@ -5,28 +5,30 @@ Composing task workflows. - Documentation for these functions are in :mod:`celery`. - You should not import from this module directly. + Documentation for some of these types are in :mod:`celery`. + You should import these from :mod:`celery` and not this module. + """ from __future__ import absolute_import from copy import deepcopy -from functools import partial as _partial +from functools import partial as _partial, reduce from operator import itemgetter from itertools import chain as _chain from kombu.utils import cached_property, fxrange, kwdict, reprcall, uuid from celery._state import current_app -from celery.utils.compat import chain_from_iterable -from celery.result import AsyncResult, GroupResult from celery.utils.functional import ( maybe_list, is_list, regen, chunks as _chunks, ) from celery.utils.text import truncate +__all__ = ['Signature', 'chain', 'xmap', 'xstarmap', 'chunks', + 'group', 'chord', 'signature', 'maybe_signature'] + class _getitem_property(object): """Attribute -> dict key descriptor. @@ -36,11 +38,13 @@ class _getitem_property(object): Example: - class Me(dict): - deep = defaultdict(dict) + >>> from collections import defaultdict - foo = _getitem_property('foo') - deep_thing = _getitem_property('deep.thing') + >>> class Me(dict): + ... deep = defaultdict(dict) + ... + ... foo = _getitem_property('foo') + ... deep_thing = _getitem_property('deep.thing') >>> me = Me() @@ -54,9 +58,9 @@ class _getitem_property(object): 10 >>> me.deep_thing = 42 - >>> me.deep_thinge + >>> me.deep_thing 42 - >>> me.deep: + >>> me.deep defaultdict(, {'thing': 42}) """ @@ -82,8 +86,9 @@ class Signature(dict): """Class that wraps the arguments and execution options for a single task invocation. - Used as the parts in a :class:`group` or to safely - pass tasks around as callbacks. + Used as the parts in a :class:`group` and other constructs, + or to pass tasks around as callbacks while being compatible + with serializers with a strict type subset. :param task: Either a task class/instance, or the name of a task. :keyword args: Positional arguments to apply. @@ -94,13 +99,13 @@ class Signature(dict): arguments will be ignored and the values in the dict will be used instead. - >>> s = subtask('tasks.add', args=(2, 2)) - >>> subtask(s) + >>> s = signature('tasks.add', args=(2, 2)) + >>> signature(s) {'task': 'tasks.add', args=(2, 2), kwargs={}, options={}} """ TYPES = {} - _type = None + _app = _type = None @classmethod def register_type(cls, subclass, name=None): @@ -108,14 +113,16 @@ class Signature(dict): return subclass @classmethod - def from_dict(self, d): + def from_dict(self, d, app=None): typ = d.get('subtask_type') if typ: - return self.TYPES[typ].from_dict(kwdict(d)) - return Signature(d) + return self.TYPES[typ].from_dict(kwdict(d), app=app) + return Signature(d, app=app) def __init__(self, task=None, args=None, kwargs=None, options=None, - type=None, subtask_type=None, immutable=False, **ex): + type=None, subtask_type=None, immutable=False, + app=None, **ex): + self._app = app init = dict.__init__ if isinstance(task, dict): @@ -137,8 +144,11 @@ class Signature(dict): immutable=immutable) def __call__(self, *partial_args, **partial_kwargs): + args, kwargs, _ = self._merge(partial_args, partial_kwargs, None) + return self.type(*args, **kwargs) + + def delay(self, *partial_args, **partial_kwargs): return self.apply_async(partial_args, partial_kwargs) - delay = __call__ def apply(self, args=(), kwargs={}, **options): """Apply this task locally.""" @@ -148,29 +158,36 @@ class Signature(dict): def _merge(self, args=(), kwargs={}, options={}): if self.immutable: - return self.args, self.kwargs, dict(self.options, **options) + return (self.args, self.kwargs, + dict(self.options, **options) if options else self.options) return (tuple(args) + tuple(self.args) if args else self.args, dict(self.kwargs, **kwargs) if kwargs else self.kwargs, dict(self.options, **options) if options else self.options) def clone(self, args=(), kwargs={}, **opts): # need to deepcopy options so origins links etc. is not modified. - args, kwargs, opts = self._merge(args, kwargs, opts) + if args or kwargs or opts: + args, kwargs, opts = self._merge(args, kwargs, opts) + else: + args, kwargs, opts = self.args, self.kwargs, self.options s = Signature.from_dict({'task': self.task, 'args': tuple(args), 'kwargs': kwargs, 'options': deepcopy(opts), 'subtask_type': self.subtask_type, - 'immutable': self.immutable}) + 'immutable': self.immutable}, app=self._app) s._type = self._type return s partial = clone - def _freeze(self, _id=None): + def freeze(self, _id=None): opts = self.options try: tid = opts['task_id'] except KeyError: tid = opts['task_id'] = _id or uuid() + if 'reply_to' not in opts: + opts['reply_to'] = self.app.oid return self.AsyncResult(tid) + _freeze = freeze def replace(self, args=None, kwargs=None, options=None): s = self.clone() @@ -184,13 +201,19 @@ class Signature(dict): def set(self, immutable=None, **options): if immutable is not None: - self.immutable = immutable + self.set_immutable(immutable) self.options.update(options) return self + def set_immutable(self, immutable): + self.immutable = immutable + def apply_async(self, args=(), kwargs={}, **options): # For callbacks: extra args are prepended to the stored args. - args, kwargs, options = self._merge(args, kwargs, options) + if args or kwargs or options: + args, kwargs, options = self._merge(args, kwargs, options) + else: + args, kwargs, options = self.args, self.kwargs, self.options return self._apply_async(args, kwargs, **options) def append_to_list_option(self, key, value): @@ -206,7 +229,7 @@ class Signature(dict): return self.append_to_list_option('link_error', errback) def flatten_links(self): - return list(chain_from_iterable(_chain( + return list(_chain.from_iterable(_chain( [[self]], (link.flatten_links() for link in maybe_list(self.options.get('link')) or []) @@ -214,15 +237,19 @@ class Signature(dict): def __or__(self, other): if not isinstance(self, chain) and isinstance(other, chain): - return chain((self,) + other.tasks) + return chain((self, ) + other.tasks, app=self._app) elif isinstance(other, chain): - return chain(*self.tasks + other.tasks) + return chain(*self.tasks + other.tasks, app=self._app) elif isinstance(other, Signature): if isinstance(self, chain): - return chain(*self.tasks + (other, )) - return chain(self, other) + return chain(*self.tasks + (other, ), app=self._app) + return chain(self, other, app=self._app) return NotImplemented + def __deepcopy__(self, memo): + memo[id(self)] = self + return dict(self) + def __invert__(self): return self.apply_async().get() @@ -235,26 +262,41 @@ class Signature(dict): args, kwargs, _ = self._merge(args, kwargs, {}) return reprcall(self['task'], args, kwargs) + def election(self): + type = self.type + app = type.app + tid = self.options.get('task_id') or uuid() + + with app.producer_or_acquire(None) as P: + props = type.backend.on_task_call(P, tid) + app.control.election(tid, 'task', self.clone(task_id=tid, **props), + connection=P.connection) + return type.AsyncResult(tid) + def __repr__(self): return self.reprcall() @cached_property def type(self): - return self._type or current_app.tasks[self['task']] + return self._type or self.app.tasks[self['task']] + + @cached_property + def app(self): + return self._app or current_app @cached_property def AsyncResult(self): try: return self.type.AsyncResult except KeyError: # task not registered - return AsyncResult + return self.app.AsyncResult @cached_property def _apply_async(self): try: return self.type.apply_async except KeyError: - return _partial(current_app.send_task, self['task']) + return _partial(self.app.send_task, self['task']) id = _getitem_property('options.task_id') task = _getitem_property('task') args = _getitem_property('args') @@ -264,6 +306,7 @@ class Signature(dict): immutable = _getitem_property('immutable') +@Signature.register_type class chain(Signature): def __init__(self, *tasks, **options): @@ -280,20 +323,22 @@ class chain(Signature): return self.apply_async(args, kwargs) @classmethod - def from_dict(self, d): + def from_dict(self, d, app=None): tasks = d['kwargs']['tasks'] if d['args'] and tasks: # partial args passed on to first task in chain (Issue #1057). tasks[0]['args'] = tasks[0]._merge(d['args'])[0] - return chain(*d['kwargs']['tasks'], **kwdict(d['options'])) + return chain(*d['kwargs']['tasks'], app=app, **kwdict(d['options'])) @property def type(self): - return self._type or self.tasks[0].type.app.tasks['celery.chain'] + try: + return self._type or self.tasks[0].type.app.tasks['celery.chain'] + except KeyError: + return self.app.tasks['celery.chain'] def __repr__(self): return ' | '.join(repr(t) for t in self.tasks) -Signature.register_type(chain) class _basemap(Signature): @@ -314,28 +359,31 @@ class _basemap(Signature): ) @classmethod - def from_dict(self, d): - return chunks(*self._unpack_args(d['kwargs']), **d['options']) + def from_dict(cls, d, app=None): + return cls(*cls._unpack_args(d['kwargs']), app=app, **d['options']) +@Signature.register_type class xmap(_basemap): _task_name = 'celery.map' def __repr__(self): task, it = self._unpack_args(self.kwargs) - return '[%s(x) for x in %s]' % (task.task, truncate(repr(it), 100)) -Signature.register_type(xmap) + return '[{0}(x) for x in {1}]'.format(task.task, + truncate(repr(it), 100)) +@Signature.register_type class xstarmap(_basemap): _task_name = 'celery.starmap' def __repr__(self): task, it = self._unpack_args(self.kwargs) - return '[%s(*x) for x in %s]' % (task.task, truncate(repr(it), 100)) -Signature.register_type(xstarmap) + return '[{0}(*x) for x in {1}]'.format(task.task, + truncate(repr(it), 100)) +@Signature.register_type class chunks(Signature): _unpack_args = itemgetter('task', 'it', 'n') @@ -347,8 +395,8 @@ class chunks(Signature): ) @classmethod - def from_dict(self, d): - return chunks(*self._unpack_args(d['kwargs']), **d['options']) + def from_dict(self, d, app=None): + return chunks(*self._unpack_args(d['kwargs']), app=app, **d['options']) def apply_async(self, args=(), kwargs={}, **opts): return self.group().apply_async(args, kwargs, **opts) @@ -359,12 +407,13 @@ class chunks(Signature): def group(self): # need to evaluate generators task, it, n = self._unpack_args(self.kwargs) - return group(xstarmap(task, part) for part in _chunks(iter(it), n)) + return group((xstarmap(task, part, app=self._app) + for part in _chunks(iter(it), n)), + app=self._app) @classmethod - def apply_chunks(cls, task, it, n): - return cls(task, it, n)() -Signature.register_type(chunks) + def apply_chunks(cls, task, it, n, app=None): + return cls(task, it, n, app=app)() def _maybe_group(tasks): @@ -377,6 +426,12 @@ def _maybe_group(tasks): return tasks +def _maybe_clone(tasks, app): + return [s.clone() if isinstance(s, Signature) else signature(s, app=app) + for s in tasks] + + +@Signature.register_type class group(Signature): def __init__(self, *tasks, **options): @@ -388,43 +443,66 @@ class group(Signature): self.tasks, self.subtask_type = tasks, 'group' @classmethod - def from_dict(self, d): + def from_dict(self, d, app=None): tasks = d['kwargs']['tasks'] if d['args'] and tasks: # partial args passed on to all tasks in the group (Issue #1057). for task in tasks: task['args'] = task._merge(d['args'])[0] - return group(tasks, **kwdict(d['options'])) + return group(tasks, app=app, **kwdict(d['options'])) - def __call__(self, *partial_args, **options): - tasks = [task.clone() for task in self.tasks] + def apply_async(self, args=(), kwargs=None, **options): + tasks = _maybe_clone(self.tasks, app=self._app) if not tasks: - return + return self.freeze() # taking the app from the first task in the list, # there may be a better solution to this, e.g. # consolidate tasks with the same app and apply them in # batches. type = tasks[0].type.app.tasks[self['task']] - return type(*type.prepare(options, tasks, partial_args)) + return type(*type.prepare(dict(self.options, **options), + tasks, args)) - def _freeze(self, _id=None): + def set_immutable(self, immutable): + for task in self.tasks: + task.set_immutable(immutable) + + def link(self, sig): + # Simply link to first task + sig = sig.clone().set(immutable=True) + return self.tasks[0].link(sig) + + def link_error(self, sig): + sig = sig.clone().set(immutable=True) + return self.tasks[0].link_error(sig) + + def apply(self, *args, **kwargs): + if not self.tasks: + return self.freeze() # empty group returns GroupResult + return Signature.apply(self, *args, **kwargs) + + def __call__(self, *partial_args, **options): + return self.apply_async(partial_args, **options) + + def freeze(self, _id=None): opts = self.options try: - gid = opts['group'] + gid = opts['task_id'] except KeyError: - gid = opts['group'] = uuid() + gid = opts['task_id'] = uuid() new_tasks, results = [], [] for task in self.tasks: - task = maybe_subtask(task).clone() + task = maybe_signature(task, app=self._app).clone() results.append(task._freeze()) new_tasks.append(task) self.tasks = self.kwargs['tasks'] = new_tasks - return GroupResult(gid, results) + return self.app.GroupResult(gid, results) + _freeze = freeze def skew(self, start=1.0, stop=None, step=1.0): - _next_skew = fxrange(start, stop, step, repeatlast=True).next + it = fxrange(start, stop, step, repeatlast=True) for task in self.tasks: - task.set(countdown=_next_skew()) + task.set(countdown=next(it)) return self def __iter__(self): @@ -437,9 +515,8 @@ class group(Signature): def type(self): return self._type or self.tasks[0].type.app.tasks[self['task']] -Signature.register_type(group) - +@Signature.register_type class chord(Signature): def __init__(self, header, body=None, task='celery.chord', @@ -447,14 +524,14 @@ class chord(Signature): Signature.__init__( self, task, args, dict(kwargs, header=_maybe_group(header), - body=maybe_subtask(body)), **options + body=maybe_signature(body, app=self._app)), **options ) self.subtask_type = 'chord' @classmethod - def from_dict(self, d): + def from_dict(self, d, app=None): args, d['kwargs'] = self._unpack_args(**kwdict(d['kwargs'])) - return self(*args, **kwdict(d)) + return self(*args, app=app, **kwdict(d)) @staticmethod def _unpack_args(header=None, body=None, **kwargs): @@ -466,14 +543,22 @@ class chord(Signature): def type(self): return self._type or self.tasks[0].type.app.tasks['celery.chord'] - def __call__(self, body=None, **kwargs): - _chord = self.type - body = (body or self.kwargs['body']).clone() - kwargs = dict(self.kwargs, body=body, **kwargs) + def apply_async(self, args=(), kwargs={}, task_id=None, **options): + body = kwargs.get('body') or self.kwargs['body'] + kwargs = dict(self.kwargs, **kwargs) + body = body.clone(**options) + + _chord = self._type or body.type.app.tasks['celery.chord'] + if _chord.app.conf.CELERY_ALWAYS_EAGER: - return self.apply((), kwargs) - callback_id = body.options.setdefault('task_id', uuid()) - return _chord.AsyncResult(callback_id, parent=_chord(**kwargs)) + return self.apply((), kwargs, task_id=task_id, **options) + res = body.freeze(task_id) + parent = _chord(self.tasks, body, args, **options) + res.parent = parent + return res + + def __call__(self, body=None, **options): + return self.apply_async((), {'body': body} if body else {}, **options) def clone(self, *args, **kwargs): s = Signature.clone(self, *args, **kwargs) @@ -492,25 +577,34 @@ class chord(Signature): self.body.link_error(errback) return errback + def set_immutable(self, immutable): + # changes mutability of header only, not callback. + for task in self.tasks: + task.set_immutable(immutable) + def __repr__(self): if self.body: return self.body.reprcall(self.tasks) - return '' % (self.tasks, ) + return ''.format(self) tasks = _getitem_property('kwargs.header') body = _getitem_property('kwargs.body') -Signature.register_type(chord) -def subtask(varies, *args, **kwargs): +def signature(varies, *args, **kwargs): if not (args or kwargs) and isinstance(varies, dict): if isinstance(varies, Signature): return varies.clone() return Signature.from_dict(varies) return Signature(varies, *args, **kwargs) +subtask = signature # XXX compat -def maybe_subtask(d): - if d is not None and isinstance(d, dict) and not isinstance(d, Signature): - return subtask(d) - return d +def maybe_signature(d, app=None): + if d is not None and isinstance(d, dict): + if not isinstance(d, Signature): + return signature(d, app=app) + if app is not None: + d._app = app + return d +maybe_subtask = maybe_signature # XXX compat diff --git a/awx/lib/site-packages/celery/concurrency/__init__.py b/awx/lib/site-packages/celery/concurrency/__init__.py index 02d222f90b..c58fdbc004 100644 --- a/awx/lib/site-packages/celery/concurrency/__init__.py +++ b/awx/lib/site-packages/celery/concurrency/__init__.py @@ -13,12 +13,15 @@ from __future__ import absolute_import # too much (e.g. for eventlet patching) from kombu.utils import symbol_by_name +__all__ = ['get_implementation'] + ALIASES = { - 'processes': 'celery.concurrency.processes:TaskPool', + 'prefork': 'celery.concurrency.prefork:TaskPool', 'eventlet': 'celery.concurrency.eventlet:TaskPool', 'gevent': 'celery.concurrency.gevent:TaskPool', 'threads': 'celery.concurrency.threads:TaskPool', 'solo': 'celery.concurrency.solo:TaskPool', + 'processes': 'celery.concurrency.prefork:TaskPool', # XXX compat alias } diff --git a/awx/lib/site-packages/celery/concurrency/asynpool.py b/awx/lib/site-packages/celery/concurrency/asynpool.py new file mode 100644 index 0000000000..ce0f38ea76 --- /dev/null +++ b/awx/lib/site-packages/celery/concurrency/asynpool.py @@ -0,0 +1,1188 @@ +# -*- coding: utf-8 -*- +""" + celery.concurrency.asynpool + ~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. note:: + + This module will be moved soon, so don't use it directly. + + Non-blocking version of :class:`multiprocessing.Pool`. + + This code deals with three major challenges: + + 1) Starting up child processes and keeping them running. + 2) Sending jobs to the processes and receiving results back. + 3) Safely shutting down this system. + +""" +from __future__ import absolute_import + +import errno +import os +import random +import select +import socket +import struct +import sys +import time + +from collections import deque, namedtuple +from io import BytesIO +from pickle import HIGHEST_PROTOCOL +from time import sleep +from weakref import WeakValueDictionary, ref + +from amqp.utils import promise +from billiard.pool import RUN, TERMINATE, ACK, NACK, WorkersJoined +from billiard import pool as _pool +from billiard.compat import buf_t, setblocking, isblocking +from billiard.einfo import ExceptionInfo +from billiard.queues import _SimpleQueue +from kombu.async import READ, WRITE, ERR +from kombu.serialization import pickle as _pickle +from kombu.utils import fxrange +from kombu.utils.compat import get_errno +from kombu.utils.eventio import SELECT_BAD_FD +from celery.five import Counter, items, values +from celery.utils.log import get_logger +from celery.utils.text import truncate +from celery.worker import state as worker_state + +try: + from _billiard import read as __read__ + from struct import unpack_from as _unpack_from + memoryview = memoryview + readcanbuf = True + + if sys.version_info[0] == 2 and sys.version_info < (2, 7, 6): + + def unpack_from(fmt, view, _unpack_from=_unpack_from): # noqa + return _unpack_from(fmt, view.tobytes()) # <- memoryview + else: + # unpack_from supports memoryview in 2.7.6 and 3.3+ + unpack_from = _unpack_from # noqa + +except (ImportError, NameError): # pragma: no cover + + def __read__(fd, buf, size, read=os.read): # noqa + chunk = read(fd, size) + n = len(chunk) + if n != 0: + buf.write(chunk) + return n + readcanbuf = False # noqa + + def unpack_from(fmt, iobuf, unpack=struct.unpack): # noqa + return unpack(fmt, iobuf.getvalue()) # <-- BytesIO + + +logger = get_logger(__name__) +error, debug = logger.error, logger.debug + +UNAVAIL = frozenset([errno.EAGAIN, errno.EINTR]) + +#: Constant sent by child process when started (ready to accept work) +WORKER_UP = 15 + +SCHED_STRATEGY_PREFETCH = 1 +SCHED_STRATEGY_FAIR = 4 + +SCHED_STRATEGIES = { + None: SCHED_STRATEGY_PREFETCH, + 'fair': SCHED_STRATEGY_FAIR, +} + +Ack = namedtuple('Ack', ('id', 'fd', 'payload')) + + +def gen_not_started(gen): + # gi_frame is None when generator stopped. + return gen.gi_frame and gen.gi_frame.f_lasti == -1 + + +def _get_job_writer(job): + try: + writer = job._writer + except AttributeError: + pass + else: + return writer() # is a weakref + + +def _select(readers=None, writers=None, err=None, timeout=0): + """Simple wrapper to :class:`~select.select`. + + :param readers: Set of reader fds to test if readable. + :param writers: Set of writer fds to test if writable. + :param err: Set of fds to test for error condition. + + All fd sets passed must be mutable as this function + will remove non-working fds from them, this also means + the caller must make sure there are still fds in the sets + before calling us again. + + :returns: tuple of ``(readable, writable, again)``, where + ``readable`` is a set of fds that have data available for read, + ``writable`` is a set of fds that is ready to be written to + and ``again`` is a flag that if set means the caller must + throw away the result and call us again. + + """ + readers = set() if readers is None else readers + writers = set() if writers is None else writers + err = set() if err is None else err + try: + r, w, e = select.select(readers, writers, err, timeout) + if e: + r = list(set(r) | set(e)) + return r, w, 0 + except (select.error, socket.error) as exc: + if get_errno(exc) == errno.EINTR: + return [], [], 1 + elif get_errno(exc) in SELECT_BAD_FD: + for fd in readers | writers | err: + try: + select.select([fd], [], [], 0) + except (select.error, socket.error) as exc: + if get_errno(exc) not in SELECT_BAD_FD: + raise + readers.discard(fd) + writers.discard(fd) + err.discard(fd) + return [], [], 1 + else: + raise + + +class Worker(_pool.Worker): + """Pool worker process.""" + dead = False + + def on_loop_start(self, pid): + # our version sends a WORKER_UP message when the process is ready + # to accept work, this will tell the parent that the inqueue fd + # is writable. + self.outq.put((WORKER_UP, (pid, ))) + + def prepare_result(self, result): + if not isinstance(result, ExceptionInfo): + return truncate(repr(result), 46) + return result + + +class ResultHandler(_pool.ResultHandler): + """Handles messages from the pool processes.""" + + def __init__(self, *args, **kwargs): + self.fileno_to_outq = kwargs.pop('fileno_to_outq') + self.on_process_alive = kwargs.pop('on_process_alive') + super(ResultHandler, self).__init__(*args, **kwargs) + # add our custom message handler + self.state_handlers[WORKER_UP] = self.on_process_alive + + def _recv_message(self, add_reader, fd, callback, + __read__=__read__, readcanbuf=readcanbuf, + BytesIO=BytesIO, unpack_from=unpack_from, + load=_pickle.load): + Hr = Br = 0 + if readcanbuf: + buf = bytearray(4) + bufv = memoryview(buf) + else: + buf = bufv = BytesIO() + # header + assert not isblocking(fd) + + while Hr < 4: + try: + n = __read__( + fd, bufv[Hr:] if readcanbuf else bufv, 4 - Hr, + ) + except OSError as exc: + if get_errno(exc) not in UNAVAIL: + raise + yield + else: + if n == 0: + raise (OSError('End of file during message') if Hr + else EOFError()) + Hr += n + + body_size, = unpack_from('>i', bufv) + if readcanbuf: + buf = bytearray(body_size) + bufv = memoryview(buf) + else: + buf = bufv = BytesIO() + + while Br < body_size: + try: + n = __read__( + fd, bufv[Br:] if readcanbuf else bufv, body_size - Br, + ) + except OSError as exc: + if get_errno(exc) not in UNAVAIL: + raise + yield + else: + if n == 0: + raise (OSError('End of file during message') if Br + else EOFError()) + Br += n + add_reader(fd, self.handle_event, fd) + if readcanbuf: + message = load(BytesIO(bufv)) + else: + bufv.seek(0) + message = load(bufv) + if message: + callback(message) + + def _make_process_result(self, hub): + """Coroutine that reads messages from the pool processes + and calls the appropriate handler.""" + fileno_to_outq = self.fileno_to_outq + on_state_change = self.on_state_change + add_reader = hub.add_reader + hub_remove = hub.remove + recv_message = self._recv_message + + def on_result_readable(fileno): + try: + fileno_to_outq[fileno] + except KeyError: # process gone + return hub_remove(fileno) + it = recv_message(add_reader, fileno, on_state_change) + try: + next(it) + except StopIteration: + pass + except (IOError, OSError, EOFError): + hub_remove(fileno) + else: + add_reader(fileno, it) + return on_result_readable + + def register_with_event_loop(self, hub): + self.handle_event = self._make_process_result(hub) + + def handle_event(self, fileno): + raise RuntimeError('Not registered with event loop') + + def on_stop_not_started(self): + """This method is always used to stop when the helper thread is not + started.""" + cache = self.cache + check_timeouts = self.check_timeouts + fileno_to_outq = self.fileno_to_outq + on_state_change = self.on_state_change + join_exited_workers = self.join_exited_workers + + # flush the processes outqueues until they have all terminated. + outqueues = set(fileno_to_outq) + while cache and outqueues and self._state != TERMINATE: + if check_timeouts is not None: + # make sure tasks with a time limit will time out. + check_timeouts() + # cannot iterate and remove at the same time + pending_remove_fd = set() + for fd in outqueues: + self._flush_outqueue( + fd, pending_remove_fd.discard, fileno_to_outq, + on_state_change, + ) + try: + join_exited_workers(shutdown=True) + except WorkersJoined: + return debug('result handler: all workers terminated') + outqueues.difference_update(pending_remove_fd) + + def _flush_outqueue(self, fd, remove, process_index, on_state_change): + try: + proc = process_index[fd] + except KeyError: + # process already found terminated + # which means its outqueue has already been processed + # by the worker lost handler. + return remove(fd) + + reader = proc.outq._reader + try: + setblocking(reader, 1) + except (OSError, IOError): + return remove(fd) + try: + if reader.poll(0): + task = reader.recv() + else: + task = None + sleep(0.5) + except (IOError, EOFError): + return remove(fd) + else: + if task: + on_state_change(task) + finally: + try: + setblocking(reader, 0) + except (OSError, IOError): + return remove(fd) + + +class AsynPool(_pool.Pool): + """Pool version that uses AIO instead of helper threads.""" + ResultHandler = ResultHandler + Worker = Worker + + def __init__(self, processes=None, synack=False, + sched_strategy=None, *args, **kwargs): + self.sched_strategy = SCHED_STRATEGIES.get(sched_strategy, + sched_strategy) + processes = self.cpu_count() if processes is None else processes + self.synack = synack + # create queue-pairs for all our processes in advance. + self._queues = dict((self.create_process_queues(), None) + for _ in range(processes)) + + # inqueue fileno -> process mapping + self._fileno_to_inq = {} + # outqueue fileno -> process mapping + self._fileno_to_outq = {} + # synqueue fileno -> process mapping + self._fileno_to_synq = {} + + # We keep track of processes that have not yet + # sent a WORKER_UP message. If a process fails to send + # this message within proc_up_timeout we terminate it + # and hope the next process will recover. + self._proc_alive_timeout = 2.0 + self._waiting_to_start = set() + + # denormalized set of all inqueues. + self._all_inqueues = set() + + # Set of fds being written to (busy) + self._active_writes = set() + + # Set of active co-routines currently writing jobs. + self._active_writers = set() + + # Set of fds that are busy (executing task) + self._busy_workers = set() + self._mark_worker_as_available = self._busy_workers.discard + + # Holds jobs waiting to be written to child processes. + self.outbound_buffer = deque() + + self.write_stats = Counter() + + super(AsynPool, self).__init__(processes, *args, **kwargs) + + for proc in self._pool: + # create initial mappings, these will be updated + # as processes are recycled, or found lost elsewhere. + self._fileno_to_outq[proc.outqR_fd] = proc + self._fileno_to_synq[proc.synqW_fd] = proc + self.on_soft_timeout = self._timeout_handler.on_soft_timeout + self.on_hard_timeout = self._timeout_handler.on_hard_timeout + + def _event_process_exit(self, hub, fd): + # This method is called whenever the process sentinel is readable. + hub.remove(fd) + self.maintain_pool() + + def register_with_event_loop(self, hub): + """Registers the async pool with the current event loop.""" + self._result_handler.register_with_event_loop(hub) + self.handle_result_event = self._result_handler.handle_event + self._create_timelimit_handlers(hub) + self._create_process_handlers(hub) + self._create_write_handlers(hub) + + # Add handler for when a process exits (calls maintain_pool) + [hub.add_reader(fd, self._event_process_exit, hub, fd) + for fd in self.process_sentinels] + # Handle_result_event is called whenever one of the + # result queues are readable. + [hub.add_reader(fd, self.handle_result_event, fd) + for fd in self._fileno_to_outq] + + # Timers include calling maintain_pool at a regular interval + # to be certain processes are restarted. + for handler, interval in items(self.timers): + hub.call_repeatedly(interval, handler) + + hub.on_tick.add(self.on_poll_start) + + def _create_timelimit_handlers(self, hub, now=time.time): + """For async pool this sets up the handlers used + to implement time limits.""" + call_later = hub.call_later + trefs = self._tref_for_id = WeakValueDictionary() + + def on_timeout_set(R, soft, hard): + if soft: + trefs[R._job] = call_later( + soft, self._on_soft_timeout, R._job, soft, hard, hub, + ) + elif hard: + trefs[R._job] = call_later( + hard, self._on_hard_timeout, R._job, + ) + self.on_timeout_set = on_timeout_set + + def _discard_tref(job): + try: + tref = trefs.pop(job) + tref.cancel() + del(tref) + except (KeyError, AttributeError): + pass # out of scope + self._discard_tref = _discard_tref + + def on_timeout_cancel(R): + _discard_tref(R._job) + self.on_timeout_cancel = on_timeout_cancel + + def _on_soft_timeout(self, job, soft, hard, hub, now=time.time): + # only used by async pool. + if hard: + self._tref_for_id[job] = hub.call_at( + now() + (hard - soft), self._on_hard_timeout, job, + ) + try: + result = self._cache[job] + except KeyError: + pass # job ready + else: + self.on_soft_timeout(result) + finally: + if not hard: + # remove tref + self._discard_tref(job) + + def _on_hard_timeout(self, job): + # only used by async pool. + try: + result = self._cache[job] + except KeyError: + pass # job ready + else: + self.on_hard_timeout(result) + finally: + # remove tref + self._discard_tref(job) + + def on_job_ready(self, job, i, obj, inqW_fd): + self._mark_worker_as_available(inqW_fd) + + def _create_process_handlers(self, hub, READ=READ, ERR=ERR): + """For async pool this will create the handlers called + when a process is up/down and etc.""" + add_reader, hub_remove = hub.add_reader, hub.remove + cache = self._cache + all_inqueues = self._all_inqueues + fileno_to_inq = self._fileno_to_inq + fileno_to_outq = self._fileno_to_outq + fileno_to_synq = self._fileno_to_synq + busy_workers = self._busy_workers + event_process_exit = self._event_process_exit + handle_result_event = self.handle_result_event + process_flush_queues = self.process_flush_queues + waiting_to_start = self._waiting_to_start + + def verify_process_alive(proc): + if proc.exitcode is None and proc in waiting_to_start: + assert proc.outqR_fd in fileno_to_outq + assert fileno_to_outq[proc.outqR_fd] is proc + assert proc.outqR_fd in hub.readers + error('Timed out waiting for UP message from %r', proc) + os.kill(proc.pid, 9) + + def on_process_up(proc): + """Called when a process has started.""" + # If we got the same fd as a previous process then we will also + # receive jobs in the old buffer, so we need to reset the + # job._write_to and job._scheduled_for attributes used to recover + # message boundaries when processes exit. + infd = proc.inqW_fd + for job in values(cache): + if job._write_to and job._write_to.inqW_fd == infd: + job._write_to = proc + if job._scheduled_for and job._scheduled_for.inqW_fd == infd: + job._scheduled_for = proc + fileno_to_outq[proc.outqR_fd] = proc + # maintain_pool is called whenever a process exits. + add_reader( + proc.sentinel, event_process_exit, hub, proc.sentinel, + ) + + assert not isblocking(proc.outq._reader) + + # handle_result_event is called when the processes outqueue is + # readable. + add_reader(proc.outqR_fd, handle_result_event, proc.outqR_fd) + + waiting_to_start.add(proc) + hub.call_later( + self._proc_alive_timeout, verify_process_alive, proc, + ) + + self.on_process_up = on_process_up + + def _remove_from_index(obj, proc, index, callback=None): + # this remove the file descriptors for a process from + # the indices. we have to make sure we don't overwrite + # another processes fds, as the fds may be reused. + try: + fd = obj.fileno() + except (IOError, OSError): + return + + try: + if index[fd] is proc: + # fd has not been reused so we can remove it from index. + index.pop(fd, None) + except KeyError: + pass + else: + hub_remove(fd) + if callback is not None: + callback(fd) + return fd + + def on_process_down(proc): + """Called when a worker process exits.""" + if proc.dead: + return + process_flush_queues(proc) + _remove_from_index(proc.outq._reader, proc, fileno_to_outq) + if proc.synq: + _remove_from_index(proc.synq._writer, proc, fileno_to_synq) + inq = _remove_from_index(proc.inq._writer, proc, fileno_to_inq, + callback=all_inqueues.discard) + if inq: + busy_workers.discard(inq) + hub_remove(proc.sentinel) + self.on_process_down = on_process_down + + def _create_write_handlers(self, hub, + pack=struct.pack, dumps=_pickle.dumps, + protocol=HIGHEST_PROTOCOL): + """For async pool this creates the handlers used to write data to + child processes.""" + fileno_to_inq = self._fileno_to_inq + fileno_to_synq = self._fileno_to_synq + outbound = self.outbound_buffer + pop_message = outbound.popleft + put_message = outbound.append + all_inqueues = self._all_inqueues + active_writes = self._active_writes + active_writers = self._active_writers + busy_workers = self._busy_workers + diff = all_inqueues.difference + add_reader, add_writer = hub.add_reader, hub.add_writer + hub_add, hub_remove = hub.add, hub.remove + mark_write_fd_as_active = active_writes.add + mark_write_gen_as_active = active_writers.add + mark_worker_as_busy = busy_workers.add + write_generator_done = active_writers.discard + get_job = self._cache.__getitem__ + write_stats = self.write_stats + is_fair_strategy = self.sched_strategy == SCHED_STRATEGY_FAIR + revoked_tasks = worker_state.revoked + getpid = os.getpid + + precalc = {ACK: self._create_payload(ACK, (0, )), + NACK: self._create_payload(NACK, (0, ))} + + def _put_back(job, _time=time.time): + # puts back at the end of the queue + if job._terminated is not None or \ + job.correlation_id in revoked_tasks: + if not job._accepted: + job._ack(None, _time(), getpid(), None) + job._set_terminated(job._terminated) + else: + # XXX linear lookup, should find a better way, + # but this happens rarely and is here to protect against races. + if job not in outbound: + outbound.appendleft(job) + self._put_back = _put_back + + # called for every event loop iteration, and if there + # are messages pending this will schedule writing one message + # by registering the 'schedule_writes' function for all currently + # inactive inqueues (not already being written to) + + # consolidate means the event loop will merge them + # and call the callback once with the list writable fds as + # argument. Using this means we minimize the risk of having + # the same fd receive every task if the pipe read buffer is not + # full. + if is_fair_strategy: + + def on_poll_start(): + if outbound and len(busy_workers) < len(all_inqueues): + #print('ALL: %r ACTIVE: %r' % (len(all_inqueues), + # len(active_writes))) + inactive = diff(active_writes) + [hub_add(fd, None, WRITE | ERR, consolidate=True) + for fd in inactive] + else: + [hub_remove(fd) for fd in diff(active_writes)] + else: + def on_poll_start(): # noqa + if outbound: + [hub_add(fd, None, WRITE | ERR, consolidate=True) + for fd in diff(active_writes)] + else: + [hub_remove(fd) for fd in diff(active_writes)] + self.on_poll_start = on_poll_start + + def on_inqueue_close(fd, proc): + # Makes sure the fd is removed from tracking when + # the connection is closed, this is essential as fds may be reused. + busy_workers.discard(fd) + try: + if fileno_to_inq[fd] is proc: + fileno_to_inq.pop(fd, None) + active_writes.discard(fd) + all_inqueues.discard(fd) + hub_remove(fd) + except KeyError: + pass + self.on_inqueue_close = on_inqueue_close + + def schedule_writes(ready_fds, shuffle=random.shuffle): + # Schedule write operation to ready file descriptor. + # The file descriptor is writeable, but that does not + # mean the process is currently reading from the socket. + # The socket is buffered so writeable simply means that + # the buffer can accept at least 1 byte of data. + shuffle(ready_fds) + for ready_fd in ready_fds: + if ready_fd in active_writes: + # already writing to this fd + continue + if is_fair_strategy and ready_fd in busy_workers: + # worker is already busy with another task + continue + if ready_fd not in all_inqueues: + hub_remove(ready_fd) + continue + try: + job = pop_message() + except IndexError: + # no more messages, remove all inactive fds from the hub. + # this is important since the fds are always writeable + # as long as there's 1 byte left in the buffer, and so + # this may create a spinloop where the event loop + # always wakes up. + for inqfd in diff(active_writes): + hub_remove(inqfd) + break + + else: + if not job._accepted: # job not accepted by another worker + try: + # keep track of what process the write operation + # was scheduled for. + proc = job._scheduled_for = fileno_to_inq[ready_fd] + except KeyError: + # write was scheduled for this fd but the process + # has since exited and the message must be sent to + # another process. + put_message(job) + continue + cor = _write_job(proc, ready_fd, job) + job._writer = ref(cor) + mark_write_gen_as_active(cor) + mark_write_fd_as_active(ready_fd) + mark_worker_as_busy(ready_fd) + + # Try to write immediately, in case there's an error. + try: + next(cor) + except StopIteration: + pass + except OSError as exc: + if get_errno(exc) != errno.EBADF: + raise + else: + add_writer(ready_fd, cor) + hub.consolidate_callback = schedule_writes + + def send_job(tup): + # Schedule writing job request for when one of the process + # inqueues are writable. + body = dumps(tup, protocol=protocol) + body_size = len(body) + header = pack('>I', body_size) + # index 1,0 is the job ID. + job = get_job(tup[1][0]) + job._payload = buf_t(header), buf_t(body), body_size + put_message(job) + self._quick_put = send_job + + def on_not_recovering(proc, fd, job): + error('Process inqueue damaged: %r %r' % (proc, proc.exitcode)) + if proc.exitcode is not None: + proc.terminate() + hub.remove(fd) + self._put_back(job) + + def _write_job(proc, fd, job): + # writes job to the worker process. + # Operation must complete if more than one byte of data + # was written. If the broker connection is lost + # and no data was written the operation shall be cancelled. + header, body, body_size = job._payload + errors = 0 + try: + # job result keeps track of what process the job is sent to. + job._write_to = proc + send = proc.send_job_offset + + Hw = Bw = 0 + # write header + while Hw < 4: + try: + Hw += send(header, Hw) + except Exception as exc: + if get_errno(exc) not in UNAVAIL: + raise + # suspend until more data + errors += 1 + if errors > 100: + on_not_recovering(proc, fd, job) + raise StopIteration() + yield + else: + errors = 0 + + # write body + while Bw < body_size: + try: + Bw += send(body, Bw) + except Exception as exc: + if get_errno(exc) not in UNAVAIL: + raise + # suspend until more data + errors += 1 + if errors > 100: + on_not_recovering(proc, fd, job) + raise StopIteration() + yield + else: + errors = 0 + finally: + hub_remove(fd) + write_stats[proc.index] += 1 + # message written, so this fd is now available + active_writes.discard(fd) + write_generator_done(job._writer()) # is a weakref + + def send_ack(response, pid, job, fd, WRITE=WRITE, ERR=ERR): + # Only used when synack is enabled. + # Schedule writing ack response for when the fd is writeable. + msg = Ack(job, fd, precalc[response]) + callback = promise(write_generator_done) + cor = _write_ack(fd, msg, callback=callback) + mark_write_gen_as_active(cor) + mark_write_fd_as_active(fd) + callback.args = (cor, ) + add_writer(fd, cor) + self.send_ack = send_ack + + def _write_ack(fd, ack, callback=None): + # writes ack back to the worker if synack enabled. + # this operation *MUST* complete, otherwise + # the worker process will hang waiting for the ack. + header, body, body_size = ack[2] + try: + try: + proc = fileno_to_synq[fd] + except KeyError: + # process died, we can safely discard the ack at this + # point. + raise StopIteration() + send = proc.send_syn_offset + + Hw = Bw = 0 + # write header + while Hw < 4: + try: + Hw += send(header, Hw) + except Exception as exc: + if get_errno(exc) not in UNAVAIL: + raise + yield + + # write body + while Bw < body_size: + try: + Bw += send(body, Bw) + except Exception as exc: + if get_errno(exc) not in UNAVAIL: + raise + # suspend until more data + yield + finally: + if callback: + callback() + # message written, so this fd is now available + active_writes.discard(fd) + + def flush(self): + if self._state == TERMINATE: + return + # cancel all tasks that have not been accepted so that NACK is sent. + for job in values(self._cache): + if not job._accepted: + job._cancel() + + # clear the outgoing buffer as the tasks will be redelivered by + # the broker anyway. + if self.outbound_buffer: + self.outbound_buffer.clear() + + self.maintain_pool() + + try: + # ...but we must continue writing the payloads we already started + # to keep message boundaries. + # The messages may be NACK'ed later if synack is enabled. + if self._state == RUN: + # flush outgoing buffers + intervals = fxrange(0.01, 0.1, 0.01, repeatlast=True) + owned_by = {} + for job in values(self._cache): + writer = _get_job_writer(job) + if writer is not None: + owned_by[writer] = job + + while self._active_writers: + writers = list(self._active_writers) + for gen in writers: + if (gen.__name__ == '_write_job' and + gen_not_started(gen)): + # has not started writing the job so can + # discard the task, but we must also remove + # it from the Pool._cache. + try: + job = owned_by[gen] + except KeyError: + pass + else: + # removes from Pool._cache + job.discard() + self._active_writers.discard(gen) + else: + try: + job = owned_by[gen] + except KeyError: + pass + else: + job_proc = job._write_to + if job_proc.exitcode is None: + self._flush_writer(job_proc, gen) + # workers may have exited in the meantime. + self.maintain_pool() + sleep(next(intervals)) # don't busyloop + finally: + self.outbound_buffer.clear() + self._active_writers.clear() + self._active_writes.clear() + self._busy_workers.clear() + + def _flush_writer(self, proc, writer): + fds = set([proc.inq._writer]) + try: + while fds: + if proc.exitcode: + break # process exited + readable, writable, again = _select( + writers=fds, err=fds, timeout=0.5, + ) + if not again and (writable or readable): + try: + next(writer) + except (StopIteration, OSError, IOError, EOFError): + break + finally: + self._active_writers.discard(writer) + + def get_process_queues(self): + """Get queues for a new process. + + Here we will find an unused slot, as there should always + be one available when we start a new process. + """ + return next(q for q, owner in items(self._queues) + if owner is None) + + def on_grow(self, n): + """Grow the pool by ``n`` proceses.""" + diff = max(self._processes - len(self._queues), 0) + if diff: + self._queues.update( + dict((self.create_process_queues(), None) for _ in range(diff)) + ) + + def on_shrink(self, n): + """Shrink the pool by ``n`` processes.""" + pass + + def create_process_queues(self): + """Creates new in, out (and optionally syn) queues, + returned as a tuple.""" + # NOTE: Pipes must be set O_NONBLOCK at creation time (the original + # fd), otherwise it will not be possible to change the flags until + # there is an actual reader/writer on the other side. + inq = _SimpleQueue(wnonblock=True) + outq = _SimpleQueue(rnonblock=True) + synq = None + assert isblocking(inq._reader) + assert not isblocking(inq._writer) + assert not isblocking(outq._reader) + assert isblocking(outq._writer) + if self.synack: + synq = _SimpleQueue(wnonblock=True) + assert isblocking(synq._reader) + assert not isblocking(synq._writer) + return inq, outq, synq + + def on_process_alive(self, pid): + """Handler called when the WORKER_UP message is received + from a child process, which marks the process as ready + to receive work.""" + try: + proc = next(w for w in self._pool if w.pid == pid) + except StopIteration: + # process already exited :( this will be handled elsewhere. + return + assert proc.inqW_fd not in self._fileno_to_inq + assert proc.inqW_fd not in self._all_inqueues + self._waiting_to_start.discard(proc) + self._fileno_to_inq[proc.inqW_fd] = proc + self._fileno_to_synq[proc.synqW_fd] = proc + self._all_inqueues.add(proc.inqW_fd) + + def on_job_process_down(self, job, pid_gone): + """Handler called for each job when the process it was assigned to + exits.""" + if job._write_to and job._write_to.exitcode: + # job was partially written + self.on_partial_read(job, job._write_to) + elif job._scheduled_for and job._scheduled_for.exitcode: + # job was only scheduled to be written to this process, + # but no data was sent so put it back on the outbound_buffer. + self._put_back(job) + + def on_job_process_lost(self, job, pid, exitcode): + """Handler called for each *started* job when the process it + was assigned to exited by mysterious means (error exitcodes and + signals)""" + self.mark_as_worker_lost(job, exitcode) + + def human_write_stats(self): + if self.write_stats is None: + return 'N/A' + vals = list(values(self.write_stats)) + total = sum(vals) + + def per(v, total): + return '{0:.2f}%'.format((float(v) / total) * 100.0 if v else 0) + + return { + 'total': total, + 'avg': per(total / len(self.write_stats) if total else 0, total), + 'all': ', '.join(per(v, total) for v in vals), + 'raw': ', '.join(map(str, vals)), + 'inqueues': { + 'total': len(self._all_inqueues), + 'active': len(self._active_writes), + } + } + + def _process_cleanup_queues(self, proc): + """Handler called to clean up a processes queues after process + exit.""" + if not proc.dead: + try: + self._queues[self._find_worker_queues(proc)] = None + except (KeyError, ValueError): + pass + + @staticmethod + def _stop_task_handler(task_handler): + """Called at shutdown to tell processes that we are shutting down.""" + for proc in task_handler.pool: + setblocking(proc.inq._writer, 1) + try: + proc.inq.put(None) + except OSError as exc: + if get_errno(exc) != errno.EBADF: + raise + + def create_result_handler(self): + return super(AsynPool, self).create_result_handler( + fileno_to_outq=self._fileno_to_outq, + on_process_alive=self.on_process_alive, + ) + + def _process_register_queues(self, proc, queues): + """Marks new ownership for ``queues`` so that the fileno indices are + updated.""" + assert queues in self._queues + b = len(self._queues) + self._queues[queues] = proc + assert b == len(self._queues) + + def _find_worker_queues(self, proc): + """Find the queues owned by ``proc``.""" + try: + return next(q for q, owner in items(self._queues) + if owner == proc) + except StopIteration: + raise ValueError(proc) + + def _setup_queues(self): + # this is only used by the original pool which uses a shared + # queue for all processes. + + # these attributes makes no sense for us, but we will still + # have to initialize them. + self._inqueue = self._outqueue = \ + self._quick_put = self._quick_get = self._poll_result = None + + def process_flush_queues(self, proc): + """Flushes all queues, including the outbound buffer, so that + all tasks that have not been started will be discarded. + + In Celery this is called whenever the transport connection is lost + (consumer restart). + + """ + resq = proc.outq._reader + on_state_change = self._result_handler.on_state_change + fds = set([resq]) + while fds and not resq.closed and self._state != TERMINATE: + readable, _, again = _select(fds, None, fds, timeout=0.01) + if readable: + try: + task = resq.recv() + except (OSError, IOError, EOFError) as exc: + if get_errno(exc) not in UNAVAIL: + debug('got %r while flushing process %r', + exc, proc, exc_info=1) + break + else: + if task is None: + debug('got sentinel while flushing process %r', proc) + break + else: + on_state_change(task) + else: + break + + def on_partial_read(self, job, proc): + """Called when a job was only partially written to a child process + and it exited.""" + # worker terminated by signal: + # we cannot reuse the sockets again, because we don't know if + # the process wrote/read anything frmo them, and if so we cannot + # restore the message boundaries. + if not job._accepted: + # job was not acked, so find another worker to send it to. + self._put_back(job) + writer = _get_job_writer(job) + if writer: + self._active_writers.discard(writer) + del(writer) + + if not proc.dead: + proc.dead = True + # Replace queues to avoid reuse + before = len(self._queues) + try: + queues = self._find_worker_queues(proc) + if self.destroy_queues(queues, proc): + self._queues[self.create_process_queues()] = None + except ValueError: + pass + # Not in queue map, make sure sockets are closed. + #self.destroy_queues((proc.inq, proc.outq, proc.synq)) + assert len(self._queues) == before + + def destroy_queues(self, queues, proc): + """Destroy queues that can no longer be used, so that they + be replaced by new sockets.""" + assert proc.exitcode is not None + self._waiting_to_start.discard(proc) + removed = 1 + try: + self._queues.pop(queues) + except KeyError: + removed = 0 + try: + self.on_inqueue_close(queues[0]._writer.fileno(), proc) + except IOError: + pass + for queue in queues: + if queue: + for sock in (queue._reader, queue._writer): + if not sock.closed: + try: + sock.close() + except (IOError, OSError): + pass + return removed + + def _create_payload(self, type_, args, + dumps=_pickle.dumps, pack=struct.pack, + protocol=HIGHEST_PROTOCOL): + body = dumps((type_, args), protocol=protocol) + size = len(body) + header = pack('>I', size) + return header, body, size + + @classmethod + def _set_result_sentinel(cls, _outqueue, _pool): + # unused + pass + + def _help_stuff_finish_args(self): + # Pool._help_stuff_finished is a classmethod so we have to use this + # trick to modify the arguments passed to it. + return (self._pool, ) + + @classmethod + def _help_stuff_finish(cls, pool): + debug( + 'removing tasks from inqueue until task handler finished', + ) + fileno_to_proc = {} + inqR = set() + for w in pool: + try: + fd = w.inq._reader.fileno() + inqR.add(fd) + fileno_to_proc[fd] = w + except IOError: + pass + while inqR: + readable, _, again = _select(inqR, timeout=0.5) + if again: + continue + if not readable: + break + for fd in readable: + fileno_to_proc[fd].inq._reader.recv() + sleep(0) + + @property + def timers(self): + return {self.maintain_pool: 5.0} diff --git a/awx/lib/site-packages/celery/concurrency/base.py b/awx/lib/site-packages/celery/concurrency/base.py index 2ab7ecba26..b2ae22608d 100644 --- a/awx/lib/site-packages/celery/concurrency/base.py +++ b/awx/lib/site-packages/celery/concurrency/base.py @@ -10,21 +10,40 @@ from __future__ import absolute_import import logging import os -import time +import sys +from billiard.einfo import ExceptionInfo +from billiard.exceptions import WorkerLostError from kombu.utils.encoding import safe_repr +from celery.five import monotonic, reraise from celery.utils import timer2 from celery.utils.log import get_logger -logger = get_logger('celery.concurrency') +__all__ = ['BasePool', 'apply_target'] + +logger = get_logger('celery.pool') def apply_target(target, args=(), kwargs={}, callback=None, - accept_callback=None, pid=None, **_): + accept_callback=None, pid=None, getpid=os.getpid, + propagate=(), monotonic=monotonic, **_): if accept_callback: - accept_callback(pid or os.getpid(), time.time()) - callback(target(*args, **kwargs)) + accept_callback(pid or getpid(), monotonic()) + try: + ret = target(*args, **kwargs) + except propagate: + raise + except Exception: + raise + except BaseException as exc: + try: + reraise(WorkerLostError, WorkerLostError(repr(exc)), + sys.exc_info()[2]) + except WorkerLostError: + callback(ExceptionInfo()) + else: + callback(ret) class BasePool(object): @@ -38,15 +57,6 @@ class BasePool(object): #: a signal handler. signal_safe = True - #: set to true if pool supports rate limits. - #: (this is here for gevent, which currently does not implement - #: the necessary timers). - rlimit_safe = True - - #: set to true if pool requires the use of a mediator - #: thread (e.g. if applying new items can block the current thread). - requires_mediator = False - #: set to true if pool uses greenlets. is_green = False @@ -56,6 +66,8 @@ class BasePool(object): #: only used by multiprocessing pool uses_semaphore = False + task_join_will_block = True + def __init__(self, limit=None, putlocks=True, forking_enable=True, callbacks_propagate=(), **options): self.limit = limit @@ -71,9 +83,15 @@ class BasePool(object): def did_start_ok(self): return True + def flush(self): + pass + def on_stop(self): pass + def register_with_event_loop(self, loop): + pass + def on_apply(self, *args, **kwargs): pass @@ -86,19 +104,16 @@ class BasePool(object): def on_hard_timeout(self, job): pass - def maybe_handle_result(self, *args): - pass - def maintain_pool(self, *args, **kwargs): pass def terminate_job(self, pid): raise NotImplementedError( - '%s does not implement kill_job' % (self.__class__, )) + '{0} does not implement kill_job'.format(type(self))) def restart(self): raise NotImplementedError( - '%s does not implement restart' % (self.__class__, )) + '{0} does not implement restart'.format(type(self))) def stop(self): self.on_stop() @@ -119,9 +134,6 @@ class BasePool(object): def on_close(self): pass - def init_callbacks(self, **kwargs): - pass - def apply_async(self, target, args=[], kwargs={}, **options): """Equivalent of the :func:`apply` built-in function. @@ -152,15 +164,3 @@ class BasePool(object): @property def num_processes(self): return self.limit - - @property - def readers(self): - return {} - - @property - def writers(self): - return {} - - @property - def timers(self): - return {} diff --git a/awx/lib/site-packages/celery/concurrency/eventlet.py b/awx/lib/site-packages/celery/concurrency/eventlet.py index 5f8d68750f..e5319a9b85 100644 --- a/awx/lib/site-packages/celery/concurrency/eventlet.py +++ b/awx/lib/site-packages/celery/concurrency/eventlet.py @@ -8,11 +8,12 @@ """ from __future__ import absolute_import -import os import sys -EVENTLET_NOPATCH = os.environ.get('EVENTLET_NOPATCH', False) -EVENTLET_DBLOCK = int(os.environ.get('EVENTLET_NOBLOCK', 0)) +from time import time + +__all__ = ['TaskPool'] + W_RACE = """\ Celery module with %s imported before eventlet patched\ """ @@ -22,23 +23,12 @@ RACE_MODS = ('billiard.', 'celery.', 'kombu.') #: Warn if we couldn't patch early enough, #: and thread/socket depending celery modules have already been loaded. for mod in (mod for mod in sys.modules if mod.startswith(RACE_MODS)): - for side in ('thread', 'threading', 'socket'): + for side in ('thread', 'threading', 'socket'): # pragma: no cover if getattr(mod, side, None): import warnings warnings.warn(RuntimeWarning(W_RACE % side)) -PATCHED = [0] -if not EVENTLET_NOPATCH and not PATCHED[0]: - PATCHED[0] += 1 - import eventlet - import eventlet.debug - eventlet.monkey_patch() - if EVENTLET_DBLOCK: - eventlet.debug.hub_blocking_detection(EVENTLET_DBLOCK) - -from time import time - from celery import signals from celery.utils import timer2 @@ -93,7 +83,7 @@ class Schedule(timer2.Schedule): @property def queue(self): - return [(g.eta, g.priority, g.entry) for g in self._queue] + return self._queue class Timer(timer2.Timer): @@ -118,9 +108,9 @@ class Timer(timer2.Timer): class TaskPool(base.BasePool): Timer = Timer - rlimit_safe = False signal_safe = False is_green = True + task_join_will_block = False def __init__(self, *args, **kwargs): from eventlet import greenthread diff --git a/awx/lib/site-packages/celery/concurrency/gevent.py b/awx/lib/site-packages/celery/concurrency/gevent.py index 881023746d..f89de92b2e 100644 --- a/awx/lib/site-packages/celery/concurrency/gevent.py +++ b/awx/lib/site-packages/celery/concurrency/gevent.py @@ -7,41 +7,30 @@ """ from __future__ import absolute_import -from __future__ import with_statement -import os - -PATCHED = [0] -if not os.environ.get('GEVENT_NOPATCH') and not PATCHED[0]: - PATCHED[0] += 1 - from gevent import monkey, version_info - monkey.patch_all() - if version_info[0] == 0: - # Signals are not working along gevent in version prior 1.0 - # and they are not monkey patch by monkey.patch_all() - from gevent import signal as _gevent_signal - _signal = __import__('signal') - _signal.signal = _gevent_signal +from time import time try: from gevent import Timeout -except ImportError: +except ImportError: # pragma: no cover Timeout = None # noqa -from time import time - from celery.utils import timer2 from .base import apply_target, BasePool +__all__ = ['TaskPool'] + def apply_timeout(target, args=(), kwargs={}, callback=None, accept_callback=None, pid=None, timeout=None, - timeout_callback=None, **rest): + timeout_callback=None, Timeout=Timeout, + apply_target=apply_target, **rest): try: with Timeout(timeout): return apply_target(target, args, kwargs, callback, - accept_callback, pid, **rest) + accept_callback, pid, + propagate=(Timeout, ), **rest) except Timeout: return timeout_callback(False, timeout) @@ -52,9 +41,7 @@ class Schedule(timer2.Schedule): from gevent.greenlet import Greenlet, GreenletExit class _Greenlet(Greenlet): - - def cancel(self): - self.kill() + cancel = Greenlet.kill self._Greenlet = _Greenlet self._GreenletExit = GreenletExit @@ -88,7 +75,7 @@ class Schedule(timer2.Schedule): @property def queue(self): - return [(g.eta, g.priority, g.entry) for g in self._queue] + return self._queue class Timer(timer2.Timer): @@ -108,8 +95,8 @@ class TaskPool(BasePool): Timer = Timer signal_safe = False - rlimit_safe = False is_green = True + task_join_will_block = False def __init__(self, *args, **kwargs): from gevent import spawn_raw diff --git a/awx/lib/site-packages/celery/concurrency/prefork.py b/awx/lib/site-packages/celery/concurrency/prefork.py new file mode 100644 index 0000000000..6b9c881c9b --- /dev/null +++ b/awx/lib/site-packages/celery/concurrency/prefork.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- +""" + celery.concurrency.prefork + ~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Pool implementation using :mod:`multiprocessing`. + +""" +from __future__ import absolute_import + +import os +import sys + +from billiard import forking_enable +from billiard.pool import RUN, CLOSE, Pool as BlockingPool + +from celery import platforms +from celery import signals +from celery._state import set_default_app, _set_task_join_will_block +from celery.app import trace +from celery.concurrency.base import BasePool +from celery.five import items +from celery.utils.log import get_logger + +from .asynpool import AsynPool + +__all__ = ['TaskPool', 'process_initializer', 'process_destructor'] + +#: List of signals to reset when a child process starts. +WORKER_SIGRESET = frozenset(['SIGTERM', + 'SIGHUP', + 'SIGTTIN', + 'SIGTTOU', + 'SIGUSR1']) + +#: List of signals to ignore when a child process starts. +WORKER_SIGIGNORE = frozenset(['SIGINT']) + +MAXTASKS_NO_BILLIARD = """\ + maxtasksperchild enabled but billiard C extension not installed! + This may lead to a deadlock, so please install the billiard C extension. +""" + +logger = get_logger(__name__) +warning, debug = logger.warning, logger.debug + + +def process_initializer(app, hostname): + """Pool child process initializer. + + This will initialize a child pool process to ensure the correct + app instance is used and things like + logging works. + + """ + _set_task_join_will_block(True) + platforms.signals.reset(*WORKER_SIGRESET) + platforms.signals.ignore(*WORKER_SIGIGNORE) + platforms.set_mp_process_title('celeryd', hostname=hostname) + # This is for Windows and other platforms not supporting + # fork(). Note that init_worker makes sure it's only + # run once per process. + app.loader.init_worker() + app.loader.init_worker_process() + app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0) or 0), + os.environ.get('CELERY_LOG_FILE') or None, + bool(os.environ.get('CELERY_LOG_REDIRECT', False)), + str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL'))) + if os.environ.get('FORKED_BY_MULTIPROCESSING'): + # pool did execv after fork + trace.setup_worker_optimizations(app) + else: + app.set_current() + set_default_app(app) + app.finalize() + trace._tasks = app._tasks # enables fast_trace_task optimization. + # rebuild execution handler for all tasks. + from celery.app.trace import build_tracer + for name, task in items(app.tasks): + task.__trace__ = build_tracer(name, task, app.loader, hostname, + app=app) + signals.worker_process_init.send(sender=None) + + +def process_destructor(pid, exitcode): + """Pool child process destructor + + Dispatch the :signal:`worker_process_shutdown` signal. + + """ + signals.worker_process_shutdown.send( + sender=None, pid=pid, exitcode=exitcode, + ) + + +class TaskPool(BasePool): + """Multiprocessing Pool implementation.""" + Pool = AsynPool + BlockingPool = BlockingPool + + uses_semaphore = True + write_stats = None + + def on_start(self): + """Run the task pool. + + Will pre-fork all workers so they're ready to accept tasks. + + """ + if self.options.get('maxtasksperchild') and sys.platform != 'win32': + try: + from billiard.connection import Connection + Connection.send_offset + except (ImportError, AttributeError): + # billiard C extension not installed + warning(MAXTASKS_NO_BILLIARD) + + forking_enable(self.forking_enable) + Pool = (self.BlockingPool if self.options.get('threads', True) + else self.Pool) + P = self._pool = Pool(processes=self.limit, + initializer=process_initializer, + on_process_exit=process_destructor, + synack=False, + **self.options) + + # Create proxy methods + self.on_apply = P.apply_async + self.maintain_pool = P.maintain_pool + self.terminate_job = P.terminate_job + self.grow = P.grow + self.shrink = P.shrink + self.flush = getattr(P, 'flush', None) # FIXME add to billiard + self.restart = P.restart + + def did_start_ok(self): + return self._pool.did_start_ok() + + def register_with_event_loop(self, loop): + try: + reg = self._pool.register_with_event_loop + except AttributeError: + return + return reg(loop) + + def on_stop(self): + """Gracefully stop the pool.""" + if self._pool is not None and self._pool._state in (RUN, CLOSE): + self._pool.close() + self._pool.join() + self._pool = None + + def on_terminate(self): + """Force terminate the pool.""" + if self._pool is not None: + self._pool.terminate() + self._pool = None + + def on_close(self): + if self._pool is not None and self._pool._state == RUN: + self._pool.close() + + def _get_info(self): + try: + write_stats = self._pool.human_write_stats + except AttributeError: + write_stats = lambda: 'N/A' # only supported by asynpool + return { + 'max-concurrency': self.limit, + 'processes': [p.pid for p in self._pool._pool], + 'max-tasks-per-child': self._pool._maxtasksperchild or 'N/A', + 'put-guarded-by-semaphore': self.putlocks, + 'timeouts': (self._pool.soft_timeout or 0, + self._pool.timeout or 0), + 'writes': write_stats() + } + + @property + def num_processes(self): + return self._pool._processes diff --git a/awx/lib/site-packages/celery/concurrency/processes/__init__.py b/awx/lib/site-packages/celery/concurrency/processes/__init__.py deleted file mode 100644 index 799f7698ea..0000000000 --- a/awx/lib/site-packages/celery/concurrency/processes/__init__.py +++ /dev/null @@ -1,148 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.concurrency.processes - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Pool implementation using :mod:`multiprocessing`. - - We use the billiard fork of multiprocessing which contains - numerous improvements. - -""" -from __future__ import absolute_import - -import os - -from billiard import forking_enable -from billiard.pool import Pool, RUN, CLOSE - -from celery import platforms -from celery import signals -from celery._state import set_default_app -from celery.concurrency.base import BasePool -from celery.task import trace - -#: List of signals to reset when a child process starts. -WORKER_SIGRESET = frozenset(['SIGTERM', - 'SIGHUP', - 'SIGTTIN', - 'SIGTTOU', - 'SIGUSR1']) - -#: List of signals to ignore when a child process starts. -WORKER_SIGIGNORE = frozenset(['SIGINT']) - - -def process_initializer(app, hostname): - """Initializes the process so it can be used to process tasks.""" - platforms.signals.reset(*WORKER_SIGRESET) - platforms.signals.ignore(*WORKER_SIGIGNORE) - platforms.set_mp_process_title('celeryd', hostname=hostname) - # This is for Windows and other platforms not supporting - # fork(). Note that init_worker makes sure it's only - # run once per process. - app.loader.init_worker() - app.loader.init_worker_process() - app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0)), - os.environ.get('CELERY_LOG_FILE') or None, - bool(os.environ.get('CELERY_LOG_REDIRECT', False)), - str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL'))) - if os.environ.get('FORKED_BY_MULTIPROCESSING'): - # pool did execv after fork - trace.setup_worker_optimizations(app) - else: - app.set_current() - set_default_app(app) - app.finalize() - trace._tasks = app._tasks # enables fast_trace_task optimization. - from celery.task.trace import build_tracer - for name, task in app.tasks.iteritems(): - task.__trace__ = build_tracer(name, task, app.loader, hostname) - signals.worker_process_init.send(sender=None) - - -class TaskPool(BasePool): - """Multiprocessing Pool implementation.""" - Pool = Pool - - requires_mediator = True - uses_semaphore = True - - def on_start(self): - """Run the task pool. - - Will pre-fork all workers so they're ready to accept tasks. - - """ - forking_enable(self.forking_enable) - P = self._pool = self.Pool(processes=self.limit, - initializer=process_initializer, - **self.options) - self.on_apply = P.apply_async - self.on_soft_timeout = P._timeout_handler.on_soft_timeout - self.on_hard_timeout = P._timeout_handler.on_hard_timeout - self.maintain_pool = P.maintain_pool - self.maybe_handle_result = P._result_handler.handle_event - - def did_start_ok(self): - return self._pool.did_start_ok() - - def on_stop(self): - """Gracefully stop the pool.""" - if self._pool is not None and self._pool._state in (RUN, CLOSE): - self._pool.close() - self._pool.join() - self._pool = None - - def on_terminate(self): - """Force terminate the pool.""" - if self._pool is not None: - self._pool.terminate() - self._pool = None - - def on_close(self): - if self._pool is not None and self._pool._state == RUN: - self._pool.close() - - def terminate_job(self, pid, signal=None): - return self._pool.terminate_job(pid, signal) - - def grow(self, n=1): - return self._pool.grow(n) - - def shrink(self, n=1): - return self._pool.shrink(n) - - def restart(self): - self._pool.restart() - - def _get_info(self): - return {'max-concurrency': self.limit, - 'processes': [p.pid for p in self._pool._pool], - 'max-tasks-per-child': self._pool._maxtasksperchild, - 'put-guarded-by-semaphore': self.putlocks, - 'timeouts': (self._pool.soft_timeout, self._pool.timeout)} - - def init_callbacks(self, **kwargs): - for k, v in kwargs.iteritems(): - setattr(self._pool, k, v) - - def handle_timeouts(self): - if self._pool._timeout_handler: - self._pool._timeout_handler.handle_event() - - @property - def num_processes(self): - return self._pool._processes - - @property - def readers(self): - return self._pool.readers - - @property - def writers(self): - return self._pool.writers - - @property - def timers(self): - return {self.maintain_pool: 5.0} diff --git a/awx/lib/site-packages/celery/concurrency/solo.py b/awx/lib/site-packages/celery/concurrency/solo.py index 51d47c6a29..a2dc199706 100644 --- a/awx/lib/site-packages/celery/concurrency/solo.py +++ b/awx/lib/site-packages/celery/concurrency/solo.py @@ -12,6 +12,8 @@ import os from .base import BasePool, apply_target +__all__ = ['TaskPool'] + class TaskPool(BasePool): """Solo task pool (blocking, inline, fast).""" diff --git a/awx/lib/site-packages/celery/concurrency/threads.py b/awx/lib/site-packages/celery/concurrency/threads.py index ba5c6416c2..fee901ecf3 100644 --- a/awx/lib/site-packages/celery/concurrency/threads.py +++ b/awx/lib/site-packages/celery/concurrency/threads.py @@ -8,10 +8,12 @@ """ from __future__ import absolute_import -from celery.utils.compat import UserDict +from celery.five import UserDict from .base import apply_target, BasePool +__all__ = ['TaskPool'] + class NullDict(UserDict): diff --git a/awx/lib/site-packages/celery/contrib/abortable.py b/awx/lib/site-packages/celery/contrib/abortable.py index f7d9b79028..37dc30d921 100644 --- a/awx/lib/site-packages/celery/contrib/abortable.py +++ b/awx/lib/site-packages/celery/contrib/abortable.py @@ -37,7 +37,7 @@ In the consumer: def run(self, **kwargs): results = [] - for x in xrange(100): + for x in range(100): # Check after every 5 loops.. if x % 5 == 0: # alternatively, check when some timer is due if self.is_aborted(**kwargs): @@ -83,9 +83,11 @@ have it block until the task is finished. """ from __future__ import absolute_import -from celery.task.base import Task +from celery import Task from celery.result import AsyncResult +__all__ = ['AbortableAsyncResult', 'AbortableTask'] + """ Task States @@ -112,7 +114,7 @@ class AbortableAsyncResult(AsyncResult): """ def is_aborted(self): - """Returns :const:`True` if the task is (being) aborted.""" + """Return :const:`True` if the task is (being) aborted.""" return self.state == ABORTED def abort(self): @@ -141,17 +143,17 @@ class AbortableTask(Task): the call evaluates to :const:`True`. """ + abstract = True - @classmethod - def AsyncResult(cls, task_id): - """Returns the accompanying AbortableAsyncResult instance.""" - return AbortableAsyncResult(task_id, backend=cls.backend) + def AsyncResult(self, task_id): + """Return the accompanying AbortableAsyncResult instance.""" + return AbortableAsyncResult(task_id, backend=self.backend) def is_aborted(self, **kwargs): """Checks against the backend whether this :class:`AbortableAsyncResult` is :const:`ABORTED`. - Always returns :const:`False` in case the `task_id` parameter + Always return :const:`False` in case the `task_id` parameter refers to a regular (non-abortable) :class:`Task`. Be aware that invoking this method will cause a hit in the diff --git a/awx/lib/site-packages/celery/contrib/batches.py b/awx/lib/site-packages/celery/contrib/batches.py index 4dd78f014e..a3feb1d284 100644 --- a/awx/lib/site-packages/celery/contrib/batches.py +++ b/awx/lib/site-packages/celery/contrib/batches.py @@ -28,7 +28,7 @@ to store it in a database. from collections import Counter count = Counter(request.kwargs['url'] for request in requests) for url, count in count.items(): - print('>>> Clicks: %s -> %s' % (url, count)) + print('>>> Clicks: {0} -> {1}'.format(url, count)) Then you can ask for a click to be counted by doing:: @@ -84,13 +84,15 @@ Using the API is done as follows:: from __future__ import absolute_import from itertools import count -from Queue import Empty, Queue from celery.task import Task +from celery.five import Empty, Queue from celery.utils.log import get_logger from celery.worker.job import Request from celery.utils import noop +__all__ = ['Batches'] + logger = get_logger(__name__) @@ -122,7 +124,7 @@ def apply_batches_task(task, args, loglevel, logfile): task.push_request(loglevel=loglevel, logfile=logfile) try: result = task(*args) - except Exception, exc: + except Exception as exc: result = None logger.error('Error: %r', exc, exc_info=True) finally: @@ -176,12 +178,12 @@ class Batches(Task): def __init__(self): self._buffer = Queue() - self._count = count(1).next + self._count = count(1) self._tref = None self._pool = None def run(self, requests): - raise NotImplementedError('%r must implement run(requests)' % (self, )) + raise NotImplementedError('must implement run(requests)') def Strategy(self, task, app, consumer): self._pool = consumer.pool @@ -193,7 +195,7 @@ class Batches(Task): put_buffer = self._buffer.put flush_buffer = self._do_flush - def task_message_handler(message, body, ack): + def task_message_handler(message, body, ack, reject, callbacks, **kw): request = Req(body, on_ack=ack, app=app, hostname=hostname, events=eventer, task=task, connection_errors=connection_errors, @@ -201,10 +203,11 @@ class Batches(Task): put_buffer(request) if self._tref is None: # first request starts flush timer. - self._tref = timer.apply_interval(self.flush_interval * 1000.0, - flush_buffer) + self._tref = timer.call_repeatedly( + self.flush_interval, flush_buffer, + ) - if not self._count() % self.flush_every: + if not next(self._count) % self.flush_every: flush_buffer() return task_message_handler diff --git a/awx/lib/site-packages/celery/contrib/bundles.py b/awx/lib/site-packages/celery/contrib/bundles.py deleted file mode 100644 index ded9aa7092..0000000000 --- a/awx/lib/site-packages/celery/contrib/bundles.py +++ /dev/null @@ -1,65 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.contrib.bundles - ~~~~~~~~~~~~~~~~~~~~~~ - - Celery PyPI Bundles. - -""" -from __future__ import absolute_import - -from celery import VERSION -from bundle.extensions import Dist - - -defaults = {'author': 'Celery Project', - 'author_email': 'bundles@celeryproject.org', - 'url': 'http://celeryproject.org', - 'license': 'BSD'} -celery = Dist('celery', VERSION, **defaults) -django_celery = Dist('django-celery', VERSION, **defaults) -flask_celery = Dist('Flask-Celery', VERSION, **defaults) - -bundles = [ - celery.Bundle( - 'celery-with-redis', - 'Bundle installing the dependencies for Celery and Redis', - requires=['redis>=2.4.4'], - ), - celery.Bundle( - 'celery-with-mongodb', - 'Bundle installing the dependencies for Celery and MongoDB', - requires=['pymongo'], - ), - celery.Bundle( - 'celery-with-couchdb', - 'Bundle installing the dependencies for Celery and CouchDB', - requires=['couchdb'], - ), - celery.Bundle( - 'celery-with-beanstalk', - 'Bundle installing the dependencies for Celery and Beanstalk', - requires=['beanstalkc'], - ), - - django_celery.Bundle( - 'django-celery-with-redis', - 'Bundle installing the dependencies for Django-Celery and Redis', - requires=['redis>=2.4.4'], - ), - django_celery.Bundle( - 'django-celery-with-mongodb', - 'Bundle installing the dependencies for Django-Celery and MongoDB', - requires=['pymongo'], - ), - django_celery.Bundle( - 'django-celery-with-couchdb', - 'Bundle installing the dependencies for Django-Celery and CouchDB', - requires=['couchdb'], - ), - django_celery.Bundle( - 'django-celery-with-beanstalk', - 'Bundle installing the dependencies for Django-Celery and Beanstalk', - requires=['beanstalkc'], - ), -] diff --git a/awx/lib/site-packages/celery/contrib/methods.py b/awx/lib/site-packages/celery/contrib/methods.py index 6d771bb56f..56aa7f479c 100644 --- a/awx/lib/site-packages/celery/contrib/methods.py +++ b/awx/lib/site-packages/celery/contrib/methods.py @@ -26,7 +26,7 @@ or with any task decorator: class X(object): - @celery.task(filter=task_method) + @app.task(filter=task_method) def add(self, x, y): return x + y @@ -46,9 +46,9 @@ or with any task decorator: from celery.task import task # ALSO BAD # GOOD: - celery = Celery(...) + app = Celery(...) - @celery.task(filter=task_method) + @app.task(filter=task_method) def foo(self): pass # ALSO GOOD: @@ -57,6 +57,12 @@ or with any task decorator: @current_app.task(filter=task_method) def foo(self): pass + # ALSO GOOD: + from celery import shared_task + + @shared_task(filter=task_method) + def foo(self): pass + Caveats ------- @@ -100,6 +106,8 @@ from __future__ import absolute_import from celery import current_app +__all__ = ['task_method', 'task'] + class task_method(object): diff --git a/awx/lib/site-packages/celery/contrib/migrate.py b/awx/lib/site-packages/celery/contrib/migrate.py index 76fe1db2fa..e4a10e9b97 100644 --- a/awx/lib/site-packages/celery/contrib/migrate.py +++ b/awx/lib/site-packages/celery/contrib/migrate.py @@ -6,8 +6,7 @@ Migration tools. """ -from __future__ import absolute_import -from __future__ import with_statement +from __future__ import absolute_import, print_function, unicode_literals import socket @@ -16,12 +15,22 @@ from itertools import cycle, islice from kombu import eventloop, Queue from kombu.common import maybe_declare -from kombu.exceptions import StdChannelError from kombu.utils.encoding import ensure_bytes from celery.app import app_or_default +from celery.five import string, string_t from celery.utils import worker_direct +__all__ = ['StopFiltering', 'State', 'republish', 'migrate_task', + 'migrate_tasks', 'move', 'task_id_eq', 'task_id_in', + 'start_filter', 'move_task_by_id', 'move_by_idmap', + 'move_by_taskmap', 'move_direct', 'move_direct_by_id'] + +MOVING_PROGRESS_FMT = """\ +Moving task {state.filtered}/{state.strtotal}: \ +{body[task]}[{body[id]}]\ +""" + class StopFiltering(Exception): pass @@ -35,13 +44,13 @@ class State(object): @property def strtotal(self): if not self.total_apx: - return u'?' - return unicode(self.total_apx) + return '?' + return string(self.total_apx) def __repr__(self): if self.filtered: - return '^%s' % self.filtered - return '%s/%s' % (self.count, self.strtotal) + return '^{0.filtered}'.format(self) + return '{0.count}/{0.strtotal}'.format(self) def republish(producer, message, exchange=None, routing_key=None, @@ -79,7 +88,7 @@ def migrate_task(producer, body_, message, queues=None): def filter_callback(callback, tasks): def filtered(body, message): - if tasks and message.payload['task'] not in tasks: + if tasks and body['task'] not in tasks: return return callback(body, message) @@ -108,7 +117,7 @@ def migrate_tasks(source, dest, migrate=migrate_task, app=None, def _maybe_queue(app, q): - if isinstance(q, basestring): + if isinstance(q, string_t): return app.amqp.queues[q] return q @@ -161,7 +170,7 @@ def move(predicate, connection=None, exchange=None, routing_key=None, .. code-block:: python def transform(value): - if isinstance(value, basestring): + if isinstance(value, string_t): return Queue(value, Exchange(value), value) return value @@ -220,7 +229,7 @@ def task_id_in(ids, body, message): def prepare_queues(queues): - if isinstance(queues, basestring): + if isinstance(queues, string_t): queues = queues.split(',') if isinstance(queues, list): queues = dict(tuple(islice(cycle(q.split(':')), None, 2)) @@ -233,12 +242,12 @@ def prepare_queues(queues): def start_filter(app, conn, filter, limit=None, timeout=1.0, ack_messages=False, tasks=None, queues=None, callback=None, forever=False, on_declare_queue=None, - consume_from=None, state=None, **kwargs): + consume_from=None, state=None, accept=None, **kwargs): state = state or State() queues = prepare_queues(queues) consume_from = [_maybe_queue(app, q) - for q in consume_from or queues.keys()] - if isinstance(tasks, basestring): + for q in consume_from or list(queues)] + if isinstance(tasks, string_t): tasks = set(tasks.split(',')) if tasks is None: tasks = set([]) @@ -251,7 +260,7 @@ def start_filter(app, conn, filter, limit=None, timeout=1.0, def ack_message(body, message): message.ack() - consumer = app.amqp.TaskConsumer(conn, queues=consume_from) + consumer = app.amqp.TaskConsumer(conn, queues=consume_from, accept=accept) if tasks: filter = filter_callback(filter, tasks) @@ -278,7 +287,7 @@ def start_filter(app, conn, filter, limit=None, timeout=1.0, _, mcount, _ = queue(consumer.channel).queue_declare(passive=True) if mcount: state.total_apx += mcount - except conn.channel_errors + (StdChannelError, ): + except conn.channel_errors: pass # start migrating messages. @@ -312,10 +321,10 @@ def move_by_idmap(map, **kwargs): Example:: - >>> reroute_idmap({ - ... '5bee6e82-f4ac-468e-bd3d-13e8600250bc': Queue(...), - ... 'ada8652d-aef3-466b-abd2-becdaf1b82b3': Queue(...), - ... '3a2b140d-7db1-41ba-ac90-c36a0ef4ab1f': Queue(...)}, + >>> move_by_idmap({ + ... '5bee6e82-f4ac-468e-bd3d-13e8600250bc': Queue('name'), + ... 'ada8652d-aef3-466b-abd2-becdaf1b82b3': Queue('name'), + ... '3a2b140d-7db1-41ba-ac90-c36a0ef4ab1f': Queue('name')}, ... queues=['hipri']) """ @@ -333,9 +342,9 @@ def move_by_taskmap(map, **kwargs): Example:: - >>> reroute_idmap({ - ... 'tasks.add': Queue(...), - ... 'tasks.mul': Queue(...), + >>> move_by_taskmap({ + ... 'tasks.add': Queue('name'), + ... 'tasks.mul': Queue('name'), ... }) """ @@ -346,12 +355,11 @@ def move_by_taskmap(map, **kwargs): return move(task_name_in_map, **kwargs) +def filter_status(state, body, message, **kwargs): + print(MOVING_PROGRESS_FMT.format(state=state, body=body, **kwargs)) + + move_direct = partial(move, transform=worker_direct) move_direct_by_id = partial(move_task_by_id, transform=worker_direct) move_direct_by_idmap = partial(move_by_idmap, transform=worker_direct) move_direct_by_taskmap = partial(move_by_taskmap, transform=worker_direct) - - -def filter_status(state, body, message): - print('Moving task %s/%s: %s[%s]' % ( - state.filtered, state.strtotal, body['task'], body['id'])) diff --git a/awx/lib/site-packages/celery/contrib/rdb.py b/awx/lib/site-packages/celery/contrib/rdb.py index 00914ab04b..3e9f55bbaa 100644 --- a/awx/lib/site-packages/celery/contrib/rdb.py +++ b/awx/lib/site-packages/celery/contrib/rdb.py @@ -34,8 +34,7 @@ Inspired by http://snippets.dzone.com/posts/show/7248 base port. The selected port will be logged by the worker. """ -from __future__ import absolute_import -from __future__ import with_statement +from __future__ import absolute_import, print_function import errno import os @@ -46,8 +45,12 @@ from pdb import Pdb from billiard import current_process +from celery.five import range from celery.platforms import ignore_errno +__all__ = ['CELERY_RDB_HOST', 'CELERY_RDB_PORT', 'default_port', + 'Rdb', 'debugger', 'set_trace'] + default_port = 6899 CELERY_RDB_HOST = os.environ.get('CELERY_RDB_HOST') or '127.0.0.1' @@ -58,6 +61,23 @@ _current = [None] _frame = getattr(sys, '_getframe') +NO_AVAILABLE_PORT = """\ +{self.ident}: Couldn't find an available port. + +Please specify one using the CELERY_RDB_PORT environment variable. +""" + +BANNER = """\ +{self.ident}: Please telnet into {self.host} {self.port}. + +Type `exit` in session to continue. + +{self.ident}: Waiting for client... +""" + +SESSION_STARTED = '{self.ident}: Now in session with {self.remote_addr}.' +SESSION_ENDED = '{self.ident}: Session with {self.remote_addr} ended.' + class Rdb(Pdb): me = 'Remote Debugger' @@ -76,16 +96,15 @@ class Rdb(Pdb): ) self._sock.setblocking(1) self._sock.listen(1) - me = '%s:%s' % (self.me, this_port) - context = self.context = {'me': me, 'host': host, 'port': this_port} - self.say('%(me)s: Please telnet %(host)s %(port)s.' - ' Type `exit` in session to continue.' % context) - self.say('%(me)s: Waiting for client...' % context) + self.ident = '{0}:{1}'.format(self.me, this_port) + self.host = host + self.port = this_port + self.say(BANNER.format(self=self)) self._client, address = self._sock.accept() self._client.setblocking(1) - context['remote_addr'] = ':'.join(str(v) for v in address) - self.say('%(me)s: In session with %(remote_addr)s' % context) + self.remote_addr = ':'.join(str(v) for v in address) + self.say(SESSION_STARTED.format(self=self)) self._handle = sys.stdin = sys.stdout = self._client.makefile('rw') Pdb.__init__(self, completekey='tab', stdin=self._handle, stdout=self._handle) @@ -97,24 +116,22 @@ class Rdb(Pdb): except ValueError: pass this_port = None - for i in xrange(search_limit): + for i in range(search_limit): _sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) this_port = port + skew + i try: _sock.bind((host, this_port)) - except socket.error, exc: + except socket.error as exc: if exc.errno in [errno.EADDRINUSE, errno.EINVAL]: continue raise else: return _sock, this_port else: - raise Exception( - '%s: Could not find available port. Please set using ' - 'environment variable CELERY_RDB_PORT' % (self.me, )) + raise Exception(NO_AVAILABLE_PORT.format(self=self)) def say(self, m): - self.out.write(m + '\n') + print(m, file=self.out) def _close_session(self): self.stdin, self.stdout = sys.stdin, sys.stdout = self._prev_handles @@ -122,7 +139,7 @@ class Rdb(Pdb): self._client.close() self._sock.close() self.active = False - self.say('%(me)s: Session %(remote_addr)s ended.' % self.context) + self.say(SESSION_ENDED.format(self=self)) def do_continue(self, arg): self._close_session() @@ -148,7 +165,7 @@ class Rdb(Pdb): def debugger(): - """Returns the current debugger instance (if any), + """Return the current debugger instance (if any), or creates a new one.""" rdb = _current[0] if rdb is None or not rdb.active: diff --git a/awx/lib/site-packages/celery/datastructures.py b/awx/lib/site-packages/celery/datastructures.py index 67f6c66b39..03d6cd1664 100644 --- a/awx/lib/site-packages/celery/datastructures.py +++ b/awx/lib/site-packages/celery/datastructures.py @@ -6,26 +6,135 @@ Custom types and data structures. """ -from __future__ import absolute_import -from __future__ import with_statement +from __future__ import absolute_import, print_function, unicode_literals import sys import time -from collections import defaultdict +from collections import defaultdict, Mapping, MutableMapping, MutableSet from heapq import heapify, heappush, heappop +from functools import partial from itertools import chain -try: - from collections import Mapping, MutableMapping -except ImportError: # pragma: no cover - MutableMapping = None # noqa - Mapping = dict # noqa - from billiard.einfo import ExceptionInfo # noqa +from kombu.utils.encoding import safe_str from kombu.utils.limits import TokenBucket # noqa -from .utils.functional import LRUCache, first, uniq # noqa +from celery.five import items +from celery.utils.functional import LRUCache, first, uniq # noqa + +try: + from django.utils.functional import LazyObject +except ImportError: + class LazyObject(object): # noqa + pass + +DOT_HEAD = """ +{IN}{type} {id} {{ +{INp}graph [{attrs}] +""" +DOT_ATTR = '{name}={value}' +DOT_NODE = '{INp}"{0}" [{attrs}]' +DOT_EDGE = '{INp}"{0}" {dir} "{1}" [{attrs}]' +DOT_ATTRSEP = ', ' +DOT_DIRS = {'graph': '--', 'digraph': '->'} +DOT_TAIL = '{IN}}}' + +__all__ = ['GraphFormatter', 'CycleError', 'DependencyGraph', + 'AttributeDictMixin', 'AttributeDict', 'DictAttribute', + 'ConfigurationView', 'LimitedSet'] + + +def force_mapping(m): + if isinstance(m, LazyObject): + m = m._wrapped + return DictAttribute(m) if not isinstance(m, Mapping) else m + + +class GraphFormatter(object): + _attr = DOT_ATTR.strip() + _node = DOT_NODE.strip() + _edge = DOT_EDGE.strip() + _head = DOT_HEAD.strip() + _tail = DOT_TAIL.strip() + _attrsep = DOT_ATTRSEP + _dirs = dict(DOT_DIRS) + + scheme = { + 'shape': 'box', + 'arrowhead': 'vee', + 'style': 'filled', + 'fontname': 'HelveticaNeue', + } + edge_scheme = { + 'color': 'darkseagreen4', + 'arrowcolor': 'black', + 'arrowsize': 0.7, + } + node_scheme = {'fillcolor': 'palegreen3', 'color': 'palegreen4'} + term_scheme = {'fillcolor': 'palegreen1', 'color': 'palegreen2'} + graph_scheme = {'bgcolor': 'mintcream'} + + def __init__(self, root=None, type=None, id=None, + indent=0, inw=' ' * 4, **scheme): + self.id = id or 'dependencies' + self.root = root + self.type = type or 'digraph' + self.direction = self._dirs[self.type] + self.IN = inw * (indent or 0) + self.INp = self.IN + inw + self.scheme = dict(self.scheme, **scheme) + self.graph_scheme = dict(self.graph_scheme, root=self.label(self.root)) + + def attr(self, name, value): + value = '"{0}"'.format(value) + return self.FMT(self._attr, name=name, value=value) + + def attrs(self, d, scheme=None): + d = dict(self.scheme, **dict(scheme, **d or {}) if scheme else d) + return self._attrsep.join( + safe_str(self.attr(k, v)) for k, v in items(d) + ) + + def head(self, **attrs): + return self.FMT( + self._head, id=self.id, type=self.type, + attrs=self.attrs(attrs, self.graph_scheme), + ) + + def tail(self): + return self.FMT(self._tail) + + def label(self, obj): + return obj + + def node(self, obj, **attrs): + return self.draw_node(obj, self.node_scheme, attrs) + + def terminal_node(self, obj, **attrs): + return self.draw_node(obj, self.term_scheme, attrs) + + def edge(self, a, b, **attrs): + return self.draw_edge(a, b, **attrs) + + def _enc(self, s): + return s.encode('utf-8', 'ignore') + + def FMT(self, fmt, *args, **kwargs): + return self._enc(fmt.format( + *args, **dict(kwargs, IN=self.IN, INp=self.INp) + )) + + def draw_edge(self, a, b, scheme=None, attrs=None): + return self.FMT( + self._edge, self.label(a), self.label(b), + dir=self.direction, attrs=self.attrs(attrs, self.edge_scheme), + ) + + def draw_node(self, obj, scheme=None, attrs=None): + return self.FMT( + self._node, self.label(obj), attrs=self.attrs(attrs, scheme), + ) class CycleError(Exception): @@ -47,7 +156,8 @@ class DependencyGraph(object): """ - def __init__(self, it=None): + def __init__(self, it=None, formatter=None): + self.formatter = formatter or GraphFormatter() self.adjacent = {} if it is not None: self.update(it) @@ -61,6 +171,10 @@ class DependencyGraph(object): (``A`` depends on ``B``).""" self[A].append(B) + def connect(self, graph): + """Add nodes from another graph.""" + self.adjacent.update(graph.adjacent) + def topsort(self): """Sort the graph topologically. @@ -85,7 +199,7 @@ class DependencyGraph(object): return [t[0] for t in graph._khan62()] def valency_of(self, obj): - """Returns the velency (degree) of a vertex in the graph.""" + """Return the valency (degree) of a vertex in the graph.""" try: l = [len(self[obj])] except KeyError: @@ -105,8 +219,8 @@ class DependencyGraph(object): self.add_edge(obj, dep) def edges(self): - """Returns generator that yields for all edges in the graph.""" - return (obj for obj, adj in self.iteritems() if adj) + """Return generator that yields for all edges in the graph.""" + return (obj for obj, adj in items(self) if adj) def _khan62(self): """Khans simple topological sort algorithm from '62 @@ -165,19 +279,32 @@ class DependencyGraph(object): return result - def to_dot(self, fh, ws=' ' * 4): + def to_dot(self, fh, formatter=None): """Convert the graph to DOT format. :param fh: A file, or a file-like object to write the graph to. """ - fh.write('digraph dependencies {\n') - for obj, adjacent in self.iteritems(): + seen = set() + draw = formatter or self.formatter + P = partial(print, file=fh) + + def if_not_seen(fun, obj): + if draw.label(obj) not in seen: + P(fun(obj)) + seen.add(draw.label(obj)) + + P(draw.head()) + for obj, adjacent in items(self): if not adjacent: - fh.write(ws + '"%s"\n' % (obj, )) + if_not_seen(draw.terminal_node, obj) for req in adjacent: - fh.write(ws + '"%s" -> "%s"\n' % (obj, req)) - fh.write('}\n') + if_not_seen(draw.node, obj) + P(draw.edge(obj, req)) + P(draw.tail()) + + def format(self, obj): + return self.formatter(obj) if self.formatter else obj def __iter__(self): return iter(self.adjacent) @@ -192,26 +319,26 @@ class DependencyGraph(object): return obj in self.adjacent def _iterate_items(self): - return self.adjacent.iteritems() + return items(self.adjacent) items = iteritems = _iterate_items def __repr__(self): return '\n'.join(self.repr_node(N) for N in self) - def repr_node(self, obj, level=1): - output = ['%s(%s)' % (obj, self.valency_of(obj))] + def repr_node(self, obj, level=1, fmt='{0}({1})'): + output = [fmt.format(obj, self.valency_of(obj))] if obj in self: for other in self[obj]: - d = '%s(%s)' % (other, self.valency_of(other)) + d = fmt.format(other, self.valency_of(other)) output.append(' ' * level + d) output.extend(self.repr_node(other, level + 1).split('\n')[1:]) return '\n'.join(output) class AttributeDictMixin(object): - """Adds attribute access to mappings. + """Augment classes with a Mapping interface by adding attribute access. - `d.key -> d[key]` + I.e. `d.key -> d[key]`. """ @@ -221,7 +348,8 @@ class AttributeDictMixin(object): return self[k] except KeyError: raise AttributeError( - "'%s' object has no attribute '%s'" % (type(self).__name__, k)) + '{0!r} object has no attribute {1!r}'.format( + type(self).__name__, k)) def __setattr__(self, key, value): """`d[key] = value -> d.key = value`""" @@ -237,6 +365,7 @@ class DictAttribute(object): """Dict interface to attributes. `obj[k] -> obj.k` + `obj[k] = val -> obj.k = val` """ obj = None @@ -287,9 +416,15 @@ class DictAttribute(object): yield key, getattr(self.obj, key) iteritems = _iterate_items + def _iterate_values(self): + for key in self._iterate_keys(): + yield getattr(self.obj, key) + itervalues = _iterate_values + if sys.version_info[0] == 3: # pragma: no cover items = _iterate_items keys = _iterate_keys + values = _iterate_values else: def keys(self): @@ -298,10 +433,16 @@ class DictAttribute(object): def items(self): return list(self._iterate_items()) + def values(self): + return list(self._iterate_values()) +MutableMapping.register(DictAttribute) + class ConfigurationView(AttributeDictMixin): """A view over an applications configuration dicts. + Custom (but older) version of :class:`collections.ChainMap`. + If the key does not exist in ``changes``, the ``defaults`` dicts are consulted. @@ -318,8 +459,7 @@ class ConfigurationView(AttributeDictMixin): _order=[changes] + defaults) def add_defaults(self, d): - if not isinstance(d, Mapping): - d = DictAttribute(d) + d = force_mapping(d) self.defaults.insert(0, d) self._order.insert(1, d) @@ -343,6 +483,10 @@ class ConfigurationView(AttributeDictMixin): except KeyError: return default + def clear(self): + """Remove all changes, but keep defaults.""" + self.changes.clear() + def setdefault(self, key, default): try: return self[key] @@ -354,13 +498,14 @@ class ConfigurationView(AttributeDictMixin): return self.changes.update(*args, **kwargs) def __contains__(self, key): - for d in self._order: - if key in d: - return True - return False + return any(key in m for m in self._order) + + def __bool__(self): + return any(self._order) + __nonzero__ = __bool__ # Py2 def __repr__(self): - return repr(dict(self.iteritems())) + return repr(dict(items(self))) def __iter__(self): return self._iterate_keys() @@ -368,7 +513,7 @@ class ConfigurationView(AttributeDictMixin): def __len__(self): # The logic for iterating keys includes uniq(), # so to be safe we count by explicitly iterating - return len(self.keys()) + return len(set().union(*self._order)) def _iter(self, op): # defaults must be first in the stream, so values in @@ -387,57 +532,62 @@ class ConfigurationView(AttributeDictMixin): return (self[key] for key in self) itervalues = _iterate_values - def keys(self): - return list(self._iterate_keys()) + if sys.version_info[0] == 3: # pragma: no cover + keys = _iterate_keys + items = _iterate_items + values = _iterate_values - def items(self): - return list(self._iterate_items()) + else: # noqa + def keys(self): + return list(self._iterate_keys()) - def values(self): - return list(self._iterate_values()) -if MutableMapping: - MutableMapping.register(ConfigurationView) + def items(self): + return list(self._iterate_items()) + + def values(self): + return list(self._iterate_values()) + +MutableMapping.register(ConfigurationView) class LimitedSet(object): """Kind-of Set with limitations. Good for when you need to test for membership (`a in set`), - but the list might become to big, so you want to limit it so it doesn't - consume too much resources. + but the list might become to big. :keyword maxlen: Maximum number of members before we start evicting expired members. :keyword expires: Time in seconds, before a membership expires. """ - __slots__ = ('maxlen', 'expires', '_data', '__len__', '_heap') def __init__(self, maxlen=None, expires=None, data=None, heap=None): self.maxlen = maxlen self.expires = expires - self._data = data or {} - self._heap = heap or [] - self.__len__ = self._data.__len__ + self._data = {} if data is None else data + self._heap = [] if heap is None else heap + # make shortcuts + self.__len__ = self._heap.__len__ + self.__iter__ = self._heap.__iter__ + self.__contains__ = self._data.__contains__ - def add(self, value): + def add(self, value, now=time.time): """Add a new member.""" - self.purge(1) - now = time.time() - self._data[value] = now - heappush(self._heap, (now, value)) - - def __reduce__(self): - return self.__class__, ( - self.maxlen, self.expires, self._data, self._heap, - ) + # offset is there to modify the length of the list, + # this way we can expire an item before inserting the value, + # and it will end up in correct order. + self.purge(1, offset=1) + inserted = now() + self._data[value] = inserted + heappush(self._heap, (inserted, value)) def clear(self): """Remove all members""" self._data.clear() self._heap[:] = [] - def pop_value(self, value): + def discard(self, value): """Remove membership by finding value.""" try: itime = self._data[value] @@ -448,28 +598,29 @@ class LimitedSet(object): except ValueError: pass self._data.pop(value, None) + pop_value = discard # XXX compat - def _expire_item(self): - """Hunt down and remove an expired item.""" - self.purge(1) - - def __contains__(self, value): - return value in self._data - - def purge(self, limit=None): + def purge(self, limit=None, offset=0, now=time.time): + """Purge expired items.""" H, maxlen = self._heap, self.maxlen if not maxlen: return + + # If the data/heap gets corrupted and limit is None + # this will go into an infinite loop, so limit must + # have a value to guard the loop. + limit = len(self) + offset if limit is None else limit + i = 0 - while len(self) >= maxlen: - if limit and i > limit: + while len(self) + offset > maxlen: + if i >= limit: break try: item = heappop(H) except IndexError: break if self.expires: - if time.time() < item[0] + self.expires: + if now() < item[0] + self.expires: heappush(H, item) break try: @@ -479,7 +630,7 @@ class LimitedSet(object): i += 1 def update(self, other, heappush=heappush): - if isinstance(other, self.__class__): + if isinstance(other, LimitedSet): self._data.update(other._data) self._heap.extend(other._heap) heapify(self._heap) @@ -490,17 +641,26 @@ class LimitedSet(object): def as_dict(self): return self._data - def __iter__(self): - return iter(self._data) + def __eq__(self, other): + return self._heap == other._heap + + def __ne__(self, other): + return not self.__eq__(other) def __repr__(self): - return 'LimitedSet(%s)' % (repr(list(self._data))[:100], ) + return 'LimitedSet({0})'.format(len(self)) - @property - def chronologically(self): - return [value for _, value in self._heap] + def __iter__(self): + return (item[1] for item in self._heap) - @property - def first(self): - """Get the oldest member.""" - return self._heap[0][1] + def __len__(self): + return len(self._heap) + + def __contains__(self, key): + return key in self._data + + def __reduce__(self): + return self.__class__, ( + self.maxlen, self.expires, self._data, self._heap, + ) +MutableSet.register(LimitedSet) diff --git a/awx/lib/site-packages/celery/events/__init__.py b/awx/lib/site-packages/celery/events/__init__.py index 9d053472e1..1b8407f23e 100644 --- a/awx/lib/site-packages/celery/events/__init__.py +++ b/awx/lib/site-packages/celery/events/__init__.py @@ -9,24 +9,42 @@ """ from __future__ import absolute_import -from __future__ import with_statement +import os import time import socket import threading +import warnings from collections import deque from contextlib import contextmanager from copy import copy +from operator import itemgetter -from kombu import eventloop, Exchange, Queue, Consumer, Producer +from kombu import Exchange, Queue, Producer +from kombu.connection import maybe_channel +from kombu.mixins import ConsumerMixin from kombu.utils import cached_property from celery.app import app_or_default from celery.utils import uuid +from celery.utils.functional import dictfilter +from celery.utils.timeutils import adjust_timestamp, utcoffset, maybe_s_to_ms + +__all__ = ['Events', 'Event', 'EventDispatcher', 'EventReceiver'] event_exchange = Exchange('celeryev', type='topic') +_TZGETTER = itemgetter('utcoffset', 'timestamp') + +W_YAJL = """ +anyjson is currently using the yajl library. +This json implementation is broken, it severely truncates floats +so timestamps will not work. + +Please uninstall yajl or force anyjson to use a different library. +""" + def get_exchange(conn): ex = copy(event_exchange) @@ -36,26 +54,49 @@ def get_exchange(conn): return ex -def Event(type, _fields=None, **fields): +def Event(type, _fields=None, __dict__=dict, __now__=time.time, **fields): """Create an event. An event is a dictionary, the only required field is ``type``. + A ``timestamp`` field will be set to the current time if not provided. """ - event = dict(_fields or {}, type=type, **fields) + event = __dict__(_fields, **fields) if _fields else fields if 'timestamp' not in event: - event['timestamp'] = time.time() + event.update(timestamp=__now__(), type=type) + else: + event['type'] = type return event +def group_from(type): + """Get the group part of an event type name. + + E.g.:: + + >>> group_from('task-sent') + 'task' + + >>> group_from('custom-my-event') + 'custom' + + """ + return type.split('-', 1)[0] + + class EventDispatcher(object): - """Send events as messages. + """Dispatches event messages. :param connection: Connection to the broker. :keyword hostname: Hostname to identify ourselves as, by default uses the hostname returned by :func:`socket.gethostname`. + :keyword groups: List of groups to send events for. :meth:`send` will + ignore send requests to groups not in this list. + If this is :const:`None`, all events will be sent. Example groups + include ``"task"`` and ``"worker"``. + :keyword enabled: Set to :const:`False` to not actually publish any events, making :meth:`send` a noop operation. @@ -71,9 +112,17 @@ class EventDispatcher(object): """ DISABLED_TRANSPORTS = set(['sql']) + app = None + + # set of callbacks to be called when :meth:`enabled`. + on_enabled = None + + # set of callbacks to be called when :meth:`disabled`. + on_disabled = None + def __init__(self, connection=None, hostname=None, enabled=True, channel=None, buffer_while_offline=True, app=None, - serializer=None): + serializer=None, groups=None): self.app = app_or_default(app or self.app) self.connection = connection self.channel = channel @@ -85,8 +134,9 @@ class EventDispatcher(object): self.serializer = serializer or self.app.conf.CELERY_EVENT_SERIALIZER self.on_enabled = set() self.on_disabled = set() - - self.enabled = enabled + self.groups = set(groups or []) + self.tzoffset = [-time.timezone, -time.altzone] + self.clock = self.app.clock if not connection and channel: self.connection = channel.connection.client self.enabled = enabled @@ -96,6 +146,14 @@ class EventDispatcher(object): self.enabled = False if self.enabled: self.enable() + self.headers = {'hostname': self.hostname} + self.pid = os.getpid() + self.warn_if_yajl() + + def warn_if_yajl(self): + import anyjson + if anyjson.implementation.name == 'yajl': + warnings.warn(UserWarning(W_YAJL)) def __enter__(self): return self @@ -118,10 +176,31 @@ class EventDispatcher(object): for callback in self.on_disabled: callback() - def publish(self, type, fields, producer, retry=False, retry_policy=None): + def publish(self, type, fields, producer, retry=False, + retry_policy=None, blind=False, utcoffset=utcoffset, + Event=Event): + """Publish event using a custom :class:`~kombu.Producer` + instance. + + :param type: Event type name, with group separated by dash (`-`). + :param fields: Dictionary of event fields, must be json serializable. + :param producer: :class:`~kombu.Producer` instance to use, + only the ``publish`` method will be called. + :keyword retry: Retry in the event of connection failure. + :keyword retry_policy: Dict of custom retry policy, see + :meth:`~kombu.Connection.ensure`. + :keyword blind: Don't set logical clock value (also do not forward + the internal logical clock). + :keyword Event: Event type used to create event, + defaults to :func:`Event`. + :keyword utcoffset: Function returning the current utcoffset in hours. + + """ + with self.mutex: - event = Event(type, hostname=self.hostname, - clock=self.app.clock.forward(), **fields) + clock = None if blind else self.clock.forward() + event = Event(type, hostname=self.hostname, utcoffset=utcoffset(), + pid=self.pid, clock=clock, **fields) exchange = self.exchange producer.publish( event, @@ -131,24 +210,37 @@ class EventDispatcher(object): retry_policy=retry_policy, declare=[exchange], serializer=self.serializer, + headers=self.headers, ) - def send(self, type, **fields): + def send(self, type, blind=False, **fields): """Send event. - :param type: Kind of event. - :keyword \*\*fields: Event arguments. + :param type: Event type name, with group separated by dash (`-`). + :keyword retry: Retry in the event of connection failure. + :keyword retry_policy: Dict of custom retry policy, see + :meth:`~kombu.Connection.ensure`. + :keyword blind: Don't set logical clock value (also do not forward + the internal logical clock). + :keyword Event: Event type used to create event, + defaults to :func:`Event`. + :keyword utcoffset: Function returning the current utcoffset in hours. + :keyword \*\*fields: Event fields, must be json serializable. """ if self.enabled: + groups = self.groups + if groups and group_from(type) not in groups: + return try: - self.publish(type, fields, self.producer) - except Exception, exc: + self.publish(type, fields, self.producer, blind) + except Exception as exc: if not self.buffer_while_offline: raise self._outbound_buffer.append((type, fields, exc)) def flush(self): + """Flushes the outbound buffer.""" while self._outbound_buffer: try: type, fields, _ = self._outbound_buffer.popleft() @@ -156,8 +248,9 @@ class EventDispatcher(object): return self.send(type, **fields) - def copy_buffer(self, other): - self._outbound_buffer = other._outbound_buffer + def extend_buffer(self, other): + """Copies the outbound buffer of another instance.""" + self._outbound_buffer.extend(other._outbound_buffer) def close(self): """Close the event dispatcher.""" @@ -172,7 +265,7 @@ class EventDispatcher(object): publisher = property(_get_publisher, _set_publisher) # XXX compat -class EventReceiver(object): +class EventReceiver(ConsumerMixin): """Capture events. :param connection: Connection to the broker. @@ -183,14 +276,13 @@ class EventReceiver(object): handler. """ - handlers = {} + app = None - def __init__(self, connection, handlers=None, routing_key='#', + def __init__(self, channel, handlers=None, routing_key='#', node_id=None, app=None, queue_prefix='celeryev'): - self.app = app_or_default(app) - self.connection = connection - if handlers is not None: - self.handlers = handlers + self.app = app_or_default(app or self.app) + self.channel = maybe_channel(channel) + self.handlers = {} if handlers is None else handlers self.routing_key = routing_key self.node_id = node_id or uuid() self.queue_prefix = queue_prefix @@ -199,7 +291,16 @@ class EventReceiver(object): exchange=self.exchange, routing_key=self.routing_key, auto_delete=True, - durable=False) + durable=False, + queue_arguments=self._get_queue_arguments()) + self.adjust_clock = self.app.clock.adjust + + def _get_queue_arguments(self): + conf = self.app.conf + return dictfilter({ + 'x-message-ttl': maybe_s_to_ms(conf.CELERY_EVENT_QUEUE_TTL), + 'x-expires': maybe_s_to_ms(conf.CELERY_EVENT_QUEUE_EXPIRES), + }) def process(self, type, event): """Process the received event by dispatching it to the appropriate @@ -207,29 +308,18 @@ class EventReceiver(object): handler = self.handlers.get(type) or self.handlers.get('*') handler and handler(event) - @contextmanager - def consumer(self, wakeup=True): - """Create event consumer.""" - consumer = Consumer(self.connection, - queues=[self.queue], no_ack=True, - accept=['application/json']) - consumer.register_callback(self._receive) - consumer.consume() + def get_consumers(self, Consumer, channel): + return [Consumer(queues=[self.queue], + callbacks=[self._receive], no_ack=True, + accept=['application/json'])] - try: - if wakeup: - self.wakeup_workers(channel=consumer.channel) - yield consumer - finally: - try: - consumer.cancel() - except self.connection.connection_errors: - pass + def on_consume_ready(self, connection, channel, consumers, + wakeup=True, **kwargs): + if wakeup: + self.wakeup_workers(channel=channel) def itercapture(self, limit=None, timeout=None, wakeup=True): - with self.consumer(wakeup=wakeup) as consumer: - yield consumer - self.drain_events(limit=limit, timeout=timeout) + return self.consume(limit=limit, timeout=timeout, wakeup=wakeup) def capture(self, limit=None, timeout=None, wakeup=True): """Open up a consumer capturing events. @@ -238,23 +328,36 @@ class EventReceiver(object): stop unless forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`. """ - list(self.itercapture(limit=limit, timeout=timeout, wakeup=wakeup)) + return list(self.consume(limit=limit, timeout=timeout, wakeup=wakeup)) def wakeup_workers(self, channel=None): self.app.control.broadcast('heartbeat', connection=self.connection, channel=channel) - def drain_events(self, **kwargs): - for _ in eventloop(self.connection, **kwargs): - pass - - def _receive(self, body, message): - type = body.pop('type').lower() + def event_from_message(self, body, localize=True, + now=time.time, tzfields=_TZGETTER, + adjust_timestamp=adjust_timestamp): + type = body.get('type', '').lower() clock = body.get('clock') if clock: - self.app.clock.adjust(clock) - self.process(type, Event(type, body)) + self.adjust_clock(clock) + + if localize: + try: + offset, timestamp = tzfields(body) + except KeyError: + pass + else: + body['timestamp'] = adjust_timestamp(timestamp, offset) + return type, Event(type, body, local_received=now()) + + def _receive(self, body, message): + self.process(*self.event_from_message(body)) + + @property + def connection(self): + return self.channel.connection.client if self.channel else None class Events(object): @@ -280,7 +383,7 @@ class Events(object): @contextmanager def default_dispatcher(self, hostname=None, enabled=True, buffer_while_offline=False): - with self.app.amqp.producer_pool.acquire(block=True) as pub: - with self.Dispatcher(pub.connection, hostname, enabled, - pub.channel, buffer_while_offline) as d: + with self.app.amqp.producer_pool.acquire(block=True) as prod: + with self.Dispatcher(prod.connection, hostname, enabled, + prod.channel, buffer_while_offline) as d: yield d diff --git a/awx/lib/site-packages/celery/events/cursesmon.py b/awx/lib/site-packages/celery/events/cursesmon.py index 179c626a5d..796565fc49 100644 --- a/awx/lib/site-packages/celery/events/cursesmon.py +++ b/awx/lib/site-packages/celery/events/cursesmon.py @@ -6,24 +6,26 @@ Graphical monitor of Celery events using curses. """ -from __future__ import absolute_import -from __future__ import with_statement +from __future__ import absolute_import, print_function import curses import sys import threading -import time from datetime import datetime from itertools import count from textwrap import wrap +from time import time from math import ceil from celery import VERSION_BANNER from celery import states from celery.app import app_or_default +from celery.five import items, values from celery.utils.text import abbr, abbrtask +__all__ = ['CursesMonitor', 'evtop'] + BORDER_SPACING = 4 LEFT_BORDER_OFFSET = 3 UUID_WIDTH = 36 @@ -35,6 +37,10 @@ MIN_TASK_WIDTH = 16 # this module is considered experimental # we don't care about coverage. +STATUS_SCREEN = """\ +events: {s.event_count} tasks:{s.task_count} workers:{w_alive}/{w_all} +""" + class CursesMonitor(object): # pragma: no cover keymap = {} @@ -48,12 +54,12 @@ class CursesMonitor(object): # pragma: no cover background = curses.COLOR_WHITE online_str = 'Workers online: ' help_title = 'Keys: ' - help = ('j:up k:down i:info t:traceback r:result c:revoke ^c: quit') - greet = 'celeryev %s' % VERSION_BANNER + help = ('j:down k:up i:info t:traceback r:result c:revoke ^c: quit') + greet = 'celery events {0}'.format(VERSION_BANNER) info_str = 'Info: ' - def __init__(self, state, keymap=None, app=None): - self.app = app_or_default(app) + def __init__(self, state, app, keymap=None): + self.app = app self.keymap = keymap or self.keymap self.state = state default_keymap = {'J': self.move_selection_down, @@ -64,6 +70,7 @@ class CursesMonitor(object): # pragma: no cover 'I': self.selection_info, 'L': self.selection_rate_limit} self.keymap = dict(default_keymap, **self.keymap) + self.lock = threading.RLock() def format_row(self, uuid, task, worker, timestamp, state): mx = self.display_width @@ -87,7 +94,8 @@ class CursesMonitor(object): # pragma: no cover state = abbr(state, STATE_WIDTH).ljust(STATE_WIDTH) timestamp = timestamp.ljust(TIMESTAMP_WIDTH) - row = '%s %s %s %s %s ' % (uuid, worker, task, timestamp, state) + row = '{0} {1} {2} {3} {4} '.format(uuid, worker, task, + timestamp, state) if self.screen_width is None: self.screen_width = len(row[:mx]) return row[:mx] @@ -156,11 +164,12 @@ class CursesMonitor(object): # pragma: no cover def alert(self, callback, title=None): self.win.erase() my, mx = self.win.getmaxyx() - y = blank_line = count(2).next + y = blank_line = count(2) if title: - self.win.addstr(y(), 3, title, curses.A_BOLD | curses.A_UNDERLINE) - blank_line() - callback(my, mx, y()) + self.win.addstr(next(y), 3, title, + curses.A_BOLD | curses.A_UNDERLINE) + next(blank_line) + callback(my, mx, next(y)) self.win.addstr(my - 1, 0, 'Press any key to continue...', curses.A_BOLD) self.win.refresh() @@ -191,19 +200,19 @@ class CursesMonitor(object): # pragma: no cover def alert_remote_control_reply(self, reply): def callback(my, mx, xs): - y = count(xs).next + y = count(xs) if not reply: self.win.addstr( - y(), 3, 'No replies received in 1s deadline.', + next(y), 3, 'No replies received in 1s deadline.', curses.A_BOLD + curses.color_pair(2), ) return for subreply in reply: - curline = y() + curline = next(y) - host, response = subreply.items()[0] - host = '%s: ' % host + host, response = next(items(subreply)) + host = '{0}: '.format(host) self.win.addstr(curline, 3, host, curses.A_BOLD) attr = curses.A_NORMAL text = '' @@ -248,16 +257,18 @@ class CursesMonitor(object): # pragma: no cover def alert_callback(mx, my, xs): my, mx = self.win.getmaxyx() - y = count(xs).next + y = count(xs) task = self.state.tasks[self.selected_task] info = task.info(extra=['state']) - infoitems = [('args', info.pop('args', None)), - ('kwargs', info.pop('kwargs', None))] + info.items() + infoitems = [ + ('args', info.pop('args', None)), + ('kwargs', info.pop('kwargs', None)) + ] + list(info.items()) for key, value in infoitems: if key is None: continue value = str(value) - curline = y() + curline = next(y) keys = key + ': ' self.win.addstr(curline, 3, keys, curses.A_BOLD) wrapped = wrap(value, mx - 2) @@ -268,7 +279,7 @@ class CursesMonitor(object): # pragma: no cover self.screen_width - (len(keys) + 3))) else: for subline in wrapped: - nexty = y() + nexty = next(y) if nexty >= my - 1: subline = ' ' * 4 + '[...]' elif nexty >= my: @@ -280,7 +291,7 @@ class CursesMonitor(object): # pragma: no cover ) return self.alert( - alert_callback, 'Task details for %s' % self.selected_task, + alert_callback, 'Task details for {0.selected_task}'.format(self), ) def selection_traceback(self): @@ -291,13 +302,13 @@ class CursesMonitor(object): # pragma: no cover return curses.beep() def alert_callback(my, mx, xs): - y = count(xs).next + y = count(xs) for line in task.traceback.split('\n'): - self.win.addstr(y(), 3, line) + self.win.addstr(next(y), 3, line) return self.alert( alert_callback, - 'Task Exception Traceback for %s' % self.selected_task, + 'Task Exception Traceback for {0.selected_task}'.format(self), ) def selection_result(self): @@ -305,15 +316,16 @@ class CursesMonitor(object): # pragma: no cover return def alert_callback(my, mx, xs): - y = count(xs).next + y = count(xs) task = self.state.tasks[self.selected_task] result = (getattr(task, 'result', None) or getattr(task, 'exception', None)) for line in wrap(result, mx - 2): - self.win.addstr(y(), 3, line) + self.win.addstr(next(y), 3, line) return self.alert( - alert_callback, 'Task Result for %s' % self.selected_task, + alert_callback, + 'Task Result for {0.selected_task}'.format(self), ) def display_task_row(self, lineno, task): @@ -322,7 +334,7 @@ class CursesMonitor(object): # pragma: no cover if task.uuid == self.selected_task: attr = curses.A_STANDOUT timestamp = datetime.utcfromtimestamp( - task.timestamp or time.time(), + task.timestamp or time(), ) timef = timestamp.strftime('%H:%M:%S') hostname = task.worker.hostname if task.worker else '*NONE*' @@ -337,88 +349,92 @@ class CursesMonitor(object): # pragma: no cover task.state, state_color | attr) def draw(self): - win = self.win - self.handle_keypress() - x = LEFT_BORDER_OFFSET - y = blank_line = count(2).next - my, mx = win.getmaxyx() - win.erase() - win.bkgd(' ', curses.color_pair(1)) - win.border() - win.addstr(1, x, self.greet, curses.A_DIM | curses.color_pair(5)) - blank_line() - win.addstr(y(), x, self.format_row('UUID', 'TASK', - 'WORKER', 'TIME', 'STATE'), - curses.A_BOLD | curses.A_UNDERLINE) - tasks = self.tasks - if tasks: - for row, (uuid, task) in enumerate(tasks): - if row > self.display_height: - break + with self.lock: + win = self.win + self.handle_keypress() + x = LEFT_BORDER_OFFSET + y = blank_line = count(2) + my, mx = win.getmaxyx() + win.erase() + win.bkgd(' ', curses.color_pair(1)) + win.border() + win.addstr(1, x, self.greet, curses.A_DIM | curses.color_pair(5)) + next(blank_line) + win.addstr(next(y), x, self.format_row('UUID', 'TASK', + 'WORKER', 'TIME', 'STATE'), + curses.A_BOLD | curses.A_UNDERLINE) + tasks = self.tasks + if tasks: + for row, (uuid, task) in enumerate(tasks): + if row > self.display_height: + break - if task.uuid: - lineno = y() - self.display_task_row(lineno, task) + if task.uuid: + lineno = next(y) + self.display_task_row(lineno, task) - # -- Footer - blank_line() - win.hline(my - 6, x, curses.ACS_HLINE, self.screen_width - 4) + # -- Footer + next(blank_line) + win.hline(my - 6, x, curses.ACS_HLINE, self.screen_width - 4) - # Selected Task Info - if self.selected_task: - win.addstr(my - 5, x, self.selected_str, curses.A_BOLD) - info = 'Missing extended info' - detail = '' - try: - selection = self.state.tasks[self.selected_task] - except KeyError: - pass + # Selected Task Info + if self.selected_task: + win.addstr(my - 5, x, self.selected_str, curses.A_BOLD) + info = 'Missing extended info' + detail = '' + try: + selection = self.state.tasks[self.selected_task] + except KeyError: + pass + else: + info = selection.info() + if 'runtime' in info: + info['runtime'] = '{0:.2f}'.format(info['runtime']) + if 'result' in info: + info['result'] = abbr(info['result'], 16) + info = ' '.join( + '{0}={1}'.format(key, value) + for key, value in items(info) + ) + detail = '... -> key i' + infowin = abbr(info, + self.screen_width - len(self.selected_str) - 2, + detail) + win.addstr(my - 5, x + len(self.selected_str), infowin) + # Make ellipsis bold + if detail in infowin: + detailpos = len(infowin) - len(detail) + win.addstr(my - 5, x + len(self.selected_str) + detailpos, + detail, curses.A_BOLD) else: - info = selection.info() - if 'runtime' in info: - info['runtime'] = '%.2fs' % info['runtime'] - if 'result' in info: - info['result'] = abbr(info['result'], 16) - info = ' '.join( - '%s=%s' % (key, value) for key, value in info.items()) - detail = '... -> key i' - infowin = abbr(info, - self.screen_width - len(self.selected_str) - 2, - detail) - win.addstr(my - 5, x + len(self.selected_str), infowin) - # Make ellipsis bold - if detail in infowin: - detailpos = len(infowin) - len(detail) - win.addstr(my - 5, x + len(self.selected_str) + detailpos, - detail, curses.A_BOLD) - else: - win.addstr(my - 5, x, 'No task selected', curses.A_NORMAL) + win.addstr(my - 5, x, 'No task selected', curses.A_NORMAL) - # Workers - if self.workers: - win.addstr(my - 4, x, self.online_str, curses.A_BOLD) - win.addstr(my - 4, x + len(self.online_str), - ', '.join(sorted(self.workers)), curses.A_NORMAL) - else: - win.addstr(my - 4, x, 'No workers discovered.') + # Workers + if self.workers: + win.addstr(my - 4, x, self.online_str, curses.A_BOLD) + win.addstr(my - 4, x + len(self.online_str), + ', '.join(sorted(self.workers)), curses.A_NORMAL) + else: + win.addstr(my - 4, x, 'No workers discovered.') - # Info - win.addstr(my - 3, x, self.info_str, curses.A_BOLD) - win.addstr( - my - 3, x + len(self.info_str), - 'events:%s tasks:%s workers:%s/%s' % ( - self.state.event_count, self.state.task_count, - len([w for w in self.state.workers.values() - if w.alive]), - len(self.state.workers)), - curses.A_DIM, - ) + # Info + win.addstr(my - 3, x, self.info_str, curses.A_BOLD) + win.addstr( + my - 3, x + len(self.info_str), + STATUS_SCREEN.format( + s=self.state, + w_alive=len([w for w in values(self.state.workers) + if w.alive]), + w_all=len(self.state.workers), + ), + curses.A_DIM, + ) - # Help - self.safe_add_str(my - 2, x, self.help_title, curses.A_BOLD) - self.safe_add_str(my - 2, x + len(self.help_title), self.help, - curses.A_DIM) - win.refresh() + # Help + self.safe_add_str(my - 2, x, self.help_title, curses.A_BOLD) + self.safe_add_str(my - 2, x + len(self.help_title), self.help, + curses.A_DIM) + win.refresh() def safe_add_str(self, y, x, string, *args, **kwargs): if x + len(string) > self.screen_width: @@ -426,46 +442,48 @@ class CursesMonitor(object): # pragma: no cover self.win.addstr(y, x, string, *args, **kwargs) def init_screen(self): - self.win = curses.initscr() - self.win.nodelay(True) - self.win.keypad(True) - curses.start_color() - curses.init_pair(1, self.foreground, self.background) - # exception states - curses.init_pair(2, curses.COLOR_RED, self.background) - # successful state - curses.init_pair(3, curses.COLOR_GREEN, self.background) - # revoked state - curses.init_pair(4, curses.COLOR_MAGENTA, self.background) - # greeting - curses.init_pair(5, curses.COLOR_BLUE, self.background) - # started state - curses.init_pair(6, curses.COLOR_YELLOW, self.foreground) + with self.lock: + self.win = curses.initscr() + self.win.nodelay(True) + self.win.keypad(True) + curses.start_color() + curses.init_pair(1, self.foreground, self.background) + # exception states + curses.init_pair(2, curses.COLOR_RED, self.background) + # successful state + curses.init_pair(3, curses.COLOR_GREEN, self.background) + # revoked state + curses.init_pair(4, curses.COLOR_MAGENTA, self.background) + # greeting + curses.init_pair(5, curses.COLOR_BLUE, self.background) + # started state + curses.init_pair(6, curses.COLOR_YELLOW, self.foreground) - self.state_colors = {states.SUCCESS: curses.color_pair(3), - states.REVOKED: curses.color_pair(4), - states.STARTED: curses.color_pair(6)} - for state in states.EXCEPTION_STATES: - self.state_colors[state] = curses.color_pair(2) + self.state_colors = {states.SUCCESS: curses.color_pair(3), + states.REVOKED: curses.color_pair(4), + states.STARTED: curses.color_pair(6)} + for state in states.EXCEPTION_STATES: + self.state_colors[state] = curses.color_pair(2) - curses.cbreak() + curses.cbreak() def resetscreen(self): - curses.nocbreak() - self.win.keypad(False) - curses.echo() - curses.endwin() + with self.lock: + curses.nocbreak() + self.win.keypad(False) + curses.echo() + curses.endwin() def nap(self): curses.napms(self.screen_delay) @property def tasks(self): - return self.state.tasks_by_timestamp()[:self.limit] + return list(self.state.tasks_by_time(limit=self.limit)) @property def workers(self): - return [hostname for hostname, w in self.state.workers.items() + return [hostname for hostname, w in items(self.state.workers) if w.alive] @@ -485,11 +503,11 @@ class DisplayThread(threading.Thread): # pragma: no cover def capture_events(app, state, display): # pragma: no cover def on_connection_error(exc, interval): - sys.stderr.write('Connection Error: %r. Retry in %ss.' % ( - exc, interval)) + print('Connection Error: {0!r}. Retry in {1}s.'.format( + exc, interval), file=sys.stderr) while 1: - sys.stderr.write('-> evtop: starting capture...\n') + print('-> evtop: starting capture...', file=sys.stderr) with app.connection() as conn: try: conn.ensure_connection(on_connection_error, @@ -497,16 +515,15 @@ def capture_events(app, state, display): # pragma: no cover recv = app.events.Receiver(conn, handlers={'*': state.event}) display.resetscreen() display.init_screen() - with recv.consumer(): - recv.drain_events(timeout=1, ignore_timeouts=True) - except (conn.connection_errors, conn.channel_errors), exc: - sys.stderr.write('Connection lost: %r' % (exc, )) + recv.capture() + except conn.connection_errors + conn.channel_errors as exc: + print('Connection lost: {0!r}'.format(exc), file=sys.stderr) def evtop(app=None): # pragma: no cover app = app_or_default(app) state = app.events.State() - display = CursesMonitor(state, app=app) + display = CursesMonitor(state, app) display.init_screen() refresher = DisplayThread(display) refresher.start() diff --git a/awx/lib/site-packages/celery/events/dumper.py b/awx/lib/site-packages/celery/events/dumper.py index 02b5f1ade5..2a3fd41ca1 100644 --- a/awx/lib/site-packages/celery/events/dumper.py +++ b/awx/lib/site-packages/celery/events/dumper.py @@ -7,16 +7,18 @@ as they happen. Think of it like a `tcpdump` for Celery events. """ -from __future__ import absolute_import +from __future__ import absolute_import, print_function import sys from datetime import datetime from celery.app import app_or_default -from celery.datastructures import LRUCache +from celery.utils.functional import LRUCache from celery.utils.timeutils import humanize_seconds +__all__ = ['Dumper', 'evdump'] + TASK_NAMES = LRUCache(limit=0xFFF) HUMAN_TYPES = {'worker-offline': 'shutdown', @@ -36,17 +38,13 @@ def humanize_type(type): return type.lower().replace('-', ' ') -def say(msg, out=sys.stdout): - out.write(msg + '\n') - - class Dumper(object): def __init__(self, out=sys.stdout): self.out = out def say(self, msg): - say(msg, out=self.out) + print(msg, file=self.out) def on_event(self, ev): timestamp = datetime.utcfromtimestamp(ev.pop('timestamp')) @@ -55,32 +53,37 @@ class Dumper(object): if type.startswith('task-'): uuid = ev.pop('uuid') if type in ('task-received', 'task-sent'): - task = TASK_NAMES[uuid] = '%s(%s) args=%s kwargs=%s' % ( - ev.pop('name'), uuid, - ev.pop('args'), - ev.pop('kwargs')) + task = TASK_NAMES[uuid] = '{0}({1}) args={2} kwargs={3}' \ + .format(ev.pop('name'), uuid, + ev.pop('args'), + ev.pop('kwargs')) else: task = TASK_NAMES.get(uuid, '') return self.format_task_event(hostname, timestamp, type, task, ev) - fields = ', '.join('%s=%s' % (key, ev[key]) for key in sorted(ev)) + fields = ', '.join( + '{0}={1}'.format(key, ev[key]) for key in sorted(ev) + ) sep = fields and ':' or '' - self.say('%s [%s] %s%s %s' % (hostname, timestamp, - humanize_type(type), sep, fields)) + self.say('{0} [{1}] {2}{3} {4}'.format( + hostname, timestamp, humanize_type(type), sep, fields), + ) - def format_task_event(self, hostname, timestamp, type, task, ev): - fields = ', '.join('%s=%s' % (key, ev[key]) for key in sorted(ev)) + def format_task_event(self, hostname, timestamp, type, task, event): + fields = ', '.join( + '{0}={1}'.format(key, event[key]) for key in sorted(event) + ) sep = fields and ':' or '' - self.say('%s [%s] %s%s %s %s' % ( - hostname, timestamp, humanize_type(type), sep, task, fields, - )) + self.say('{0} [{1}] {2}{3} {4} {5}'.format( + hostname, timestamp, humanize_type(type), sep, task, fields), + ) def evdump(app=None, out=sys.stdout): app = app_or_default(app) dumper = Dumper(out=out) dumper.say('-> evdump: starting capture...') - conn = app.connection() + conn = app.connection().clone() def _error_handler(exc, interval): dumper.say(CONNECTION_ERROR % ( @@ -89,7 +92,6 @@ def evdump(app=None, out=sys.stdout): while 1: try: - conn = conn.clone() conn.ensure_connection(_error_handler) recv = app.events.Receiver(conn, handlers={'*': dumper.on_event}) recv.capture() diff --git a/awx/lib/site-packages/celery/events/snapshot.py b/awx/lib/site-packages/celery/events/snapshot.py index 9839f24d77..0dd41554c8 100644 --- a/awx/lib/site-packages/celery/events/snapshot.py +++ b/awx/lib/site-packages/celery/events/snapshot.py @@ -16,17 +16,19 @@ from kombu.utils.limits import TokenBucket from celery import platforms from celery.app import app_or_default -from celery.utils import timer2 +from celery.utils.timer2 import Timer from celery.utils.dispatch import Signal from celery.utils.imports import instantiate from celery.utils.log import get_logger from celery.utils.timeutils import rate +__all__ = ['Polaroid', 'evcam'] + logger = get_logger('celery.evcam') class Polaroid(object): - timer = timer2 + timer = None shutter_signal = Signal(providing_args=('state', )) cleanup_signal = Signal() clear_after = False @@ -40,15 +42,15 @@ class Polaroid(object): self.state = state self.freq = freq self.cleanup_freq = cleanup_freq - self.timer = timer or self.timer + self.timer = timer or self.timer or Timer() self.logger = logger self.maxrate = maxrate and TokenBucket(rate(maxrate)) def install(self): - self._tref = self.timer.apply_interval(self.freq * 1000.0, - self.capture) - self._ctref = self.timer.apply_interval(self.cleanup_freq * 1000.0, - self.cleanup) + self._tref = self.timer.call_repeatedly(self.freq, self.capture) + self._ctref = self.timer.call_repeatedly( + self.cleanup_freq, self.cleanup, + ) def on_shutter(self, state): pass @@ -94,9 +96,8 @@ def evcam(camera, freq=1.0, maxrate=None, loglevel=0, app.log.setup_logging_subsystem(loglevel, logfile) - logger.info( - '-> evcam: Taking snapshots with %s (every %s secs.)\n' % ( - camera, freq)) + print('-> evcam: Taking snapshots with {0} (every {1} secs.)'.format( + camera, freq)) state = app.events.State() cam = instantiate(camera, state, app=app, freq=freq, maxrate=maxrate, timer=timer) diff --git a/awx/lib/site-packages/celery/events/state.py b/awx/lib/site-packages/celery/events/state.py index 8c129c2a38..d68f8bf32b 100644 --- a/awx/lib/site-packages/celery/events/state.py +++ b/awx/lib/site-packages/celery/events/state.py @@ -17,67 +17,121 @@ """ from __future__ import absolute_import -from __future__ import with_statement -import heapq import threading +from datetime import datetime +from heapq import heappush, heappop +from itertools import islice from time import time +from kombu.clocks import timetuple from kombu.utils import kwdict from celery import states -from celery.datastructures import AttributeDict, LRUCache +from celery.datastructures import AttributeDict +from celery.five import items, values +from celery.utils.functional import LRUCache +from celery.utils.log import get_logger # The window (in percentage) is added to the workers heartbeat # frequency. If the time between updates exceeds this window, # then the worker is considered to be offline. HEARTBEAT_EXPIRE_WINDOW = 200 +# Max drift between event timestamp and time of event received +# before we alert that clocks may be unsynchronized. +HEARTBEAT_DRIFT_MAX = 16 + +DRIFT_WARNING = """\ +Substantial drift from %s may mean clocks are out of sync. Current drift is +%s seconds. [orig: %s recv: %s] +""" + +logger = get_logger(__name__) +warn = logger.warning + +R_STATE = '' +R_WORKER = ' self.heartbeat_max: - self.heartbeats = self.heartbeats[self.heartbeat_max:] + def update_heartbeat(self, received, timestamp): + if not received or not timestamp: + return + drift = abs(int(received) - int(timestamp)) + if drift > HEARTBEAT_DRIFT_MAX: + warn(DRIFT_WARNING, self.hostname, drift, + datetime.fromtimestamp(received), + datetime.fromtimestamp(timestamp)) + heartbeats, hbmax = self.heartbeats, self.heartbeat_max + if not heartbeats or (received and received > heartbeats[-1]): + heappush(heartbeats, received) + if len(heartbeats) > hbmax: + heartbeats[:] = heartbeats[hbmax:] def __repr__(self): - return '' % (self.name, self.uuid, self.state) + return R_TASK.format(self) @property def ready(self): @@ -217,7 +278,7 @@ class State(object): if workers is None else workers) self.tasks = (LRUCache(max_tasks_in_memory) if tasks is None else tasks) - self._taskheap = None # reserved for __reduce__ in 3.1 + self._taskheap = [] if taskheap is None else taskheap self.max_workers_in_memory = max_workers_in_memory self.max_tasks_in_memory = max_tasks_in_memory self._mutex = threading.Lock() @@ -247,6 +308,7 @@ class State(object): self.tasks.update(in_progress) else: self.tasks.clear() + self._taskheap[:] = [] def _clear(self, ready=True): self.workers.clear() @@ -259,38 +321,56 @@ class State(object): return self._clear(ready) def get_or_create_worker(self, hostname, **kwargs): - """Get or create worker by hostname.""" + """Get or create worker by hostname. + + Return tuple of ``(worker, was_created)``. + """ try: worker = self.workers[hostname] worker.update(kwargs) + return worker, False except KeyError: worker = self.workers[hostname] = Worker( hostname=hostname, **kwargs) - return worker + return worker, True def get_or_create_task(self, uuid): """Get or create task by uuid.""" try: - return self.tasks[uuid] + return self.tasks[uuid], False except KeyError: task = self.tasks[uuid] = Task(uuid=uuid) - return task + return task, True def worker_event(self, type, fields): """Process worker event.""" - hostname = fields.pop('hostname', None) - if hostname: - worker = self.get_or_create_worker(hostname) - handler = getattr(worker, 'on_%s' % type, None) + try: + hostname = fields['hostname'] + except KeyError: + pass + else: + worker, created = self.get_or_create_worker(hostname) + handler = getattr(worker, 'on_' + type, None) if handler: handler(**fields) + return worker, created - def task_event(self, type, fields): + def task_event(self, type, fields, timetuple=timetuple): """Process task event.""" uuid = fields['uuid'] hostname = fields['hostname'] - worker = self.get_or_create_worker(hostname) - task = self.get_or_create_task(uuid) + worker, _ = self.get_or_create_worker(hostname) + task, created = self.get_or_create_task(uuid) + task.worker = worker + maxtasks = self.max_tasks_in_memory * 2 + + taskheap = self._taskheap + timestamp = fields.get('timestamp') or 0 + clock = 0 if type == 'sent' else fields.get('clock') + heappush(taskheap, timetuple(clock, timestamp, worker.id, task)) + if len(taskheap) > maxtasks: + heappop(taskheap) + handler = getattr(task, 'on_' + type, None) if type == 'received': self.task_count += 1 @@ -298,7 +378,7 @@ class State(object): handler(**fields) else: task.on_unknown_event(type, **fields) - task.worker = worker + return created def event(self, event): with self._mutex: @@ -316,62 +396,57 @@ class State(object): self.event_callback(self, event) def itertasks(self, limit=None): - for index, row in enumerate(self.tasks.iteritems()): + for index, row in enumerate(items(self.tasks)): yield row if limit and index + 1 >= limit: break - def tasks_by_timestamp(self, limit=None): - """Get tasks by timestamp. - - Returns a list of `(uuid, task)` tuples. - - """ - return self._sort_tasks_by_time(self.itertasks(limit)) - - def _sort_tasks_by_time(self, tasks): - """Sort task items by time.""" - return sorted(tasks, key=lambda t: t[1].timestamp, - reverse=True) + def tasks_by_time(self, limit=None): + """Generator giving tasks ordered by time, + in ``(uuid, Task)`` tuples.""" + seen = set() + for evtup in islice(reversed(self._taskheap), 0, limit): + uuid = evtup[3].uuid + if uuid not in seen: + yield uuid, evtup[3] + seen.add(uuid) + tasks_by_timestamp = tasks_by_time def tasks_by_type(self, name, limit=None): """Get all tasks by type. - Returns a list of `(uuid, task)` tuples. + Return a list of ``(uuid, Task)`` tuples. """ - sorted_tasks = self._sort_tasks_by_time( - (uuid, task) for uuid, task in self.tasks.iteritems() - if task.name == name) - - return sorted_tasks[0:limit or None] + return islice( + ((uuid, task) for uuid, task in self.tasks_by_time() + if task.name == name), + 0, limit, + ) def tasks_by_worker(self, hostname, limit=None): """Get all tasks by worker. - Returns a list of `(uuid, task)` tuples. - """ - return self._sort_tasks_by_time( - (uuid, task) for uuid, task in self.itertasks(limit) - if task.worker.hostname == hostname) + return islice( + ((uuid, task) for uuid, task in self.tasks_by_time() + if task.worker.hostname == hostname), + 0, limit, + ) def task_types(self): - """Returns a list of all seen task types.""" - return list(sorted(set(task.name for task in self.tasks.itervalues()))) + """Return a list of all seen task types.""" + return list(sorted(set(task.name for task in values(self.tasks)))) def alive_workers(self): - """Returns a list of (seemingly) alive workers.""" - return [w for w in self.workers.values() if w.alive] + """Return a list of (seemingly) alive workers.""" + return [w for w in values(self.workers) if w.alive] def __repr__(self): - return '' % (self.event_count, - self.task_count) + return R_STATE.format(self) def __reduce__(self): return self.__class__, ( - self.event_callback, self.workers, self.tasks, None, + self.event_callback, self.workers, self.tasks, self._taskheap, self.max_workers_in_memory, self.max_tasks_in_memory, ) - -state = State() diff --git a/awx/lib/site-packages/celery/exceptions.py b/awx/lib/site-packages/celery/exceptions.py index cd5dc66a01..25c7d4f4ab 100644 --- a/awx/lib/site-packages/celery/exceptions.py +++ b/awx/lib/site-packages/celery/exceptions.py @@ -8,12 +8,23 @@ """ from __future__ import absolute_import +from .five import string_t + from billiard.exceptions import ( # noqa SoftTimeLimitExceeded, TimeLimitExceeded, WorkerLostError, Terminated, ) +__all__ = ['SecurityError', 'Ignore', 'SystemTerminate', 'QueueNotFound', + 'ImproperlyConfigured', 'NotRegistered', 'AlreadyRegistered', + 'TimeoutError', 'MaxRetriesExceededError', 'Retry', + 'TaskRevokedError', 'NotConfigured', 'AlwaysEagerIgnored', + 'InvalidTaskError', 'ChordError', 'CPendingDeprecationWarning', + 'CDeprecationWarning', 'FixupWarning', 'DuplicateNodenameWarning', + 'SoftTimeLimitExceeded', 'TimeLimitExceeded', 'WorkerLostError', + 'Terminated'] + UNREGISTERED_FMT = """\ -Task of kind %s is not registered, please make sure it's imported.\ +Task of kind {0} is not registered, please make sure it's imported.\ """ @@ -29,6 +40,18 @@ class Ignore(Exception): """A task can raise this to ignore doing state updates.""" +class Reject(Exception): + """A task can raise this if it wants to reject/requeue the message.""" + + def __init__(self, reason=None, requeue=False): + self.reason = reason + self.requeue = requeue + super(Reject, self).__init__(reason, requeue) + + def __repr__(self): + return 'reject requeue=%s: %s' % (self.requeue, self.reason) + + class SystemTerminate(SystemExit): """Signals that the worker should terminate.""" @@ -45,7 +68,7 @@ class NotRegistered(KeyError): """The task is not registered.""" def __repr__(self): - return UNREGISTERED_FMT % str(self) + return UNREGISTERED_FMT.format(self) class AlreadyRegistered(Exception): @@ -60,7 +83,7 @@ class MaxRetriesExceededError(Exception): """The tasks max restart limit has been exceeded.""" -class RetryTaskError(Exception): +class Retry(Exception): """The task is to be retried later.""" #: Optional message describing context of retry. @@ -75,7 +98,7 @@ class RetryTaskError(Exception): def __init__(self, message=None, exc=None, when=None, **kwargs): from kombu.utils.encoding import safe_repr self.message = message - if isinstance(exc, basestring): + if isinstance(exc, string_t): self.exc, self.excs = None, exc else: self.exc, self.excs = exc, safe_repr(exc) if exc else None @@ -84,18 +107,19 @@ class RetryTaskError(Exception): def humanize(self): if isinstance(self.when, int): - return 'in %ss' % self.when - return 'at %s' % (self.when, ) + return 'in {0.when}s'.format(self) + return 'at {0.when}'.format(self) def __str__(self): if self.message: return self.message if self.excs: - return 'Retry %s: %r' % (self.humanize(), self.excs) - return 'Retry %s' % self.humanize() + return 'Retry {0}: {1}'.format(self.humanize(), self.excs) + return 'Retry {0}'.format(self.humanize()) def __reduce__(self): return self.__class__, (self.message, self.excs, self.when) +RetryTaskError = Retry # XXX compat class TaskRevokedError(Exception): @@ -114,6 +138,14 @@ class InvalidTaskError(Exception): """The task has invalid data or is not properly constructed.""" +class IncompleteStream(Exception): + """Found the end of a stream of data, but the data is not yet complete.""" + + +class ChordError(Exception): + """A task part of the chord raised an exception.""" + + class CPendingDeprecationWarning(PendingDeprecationWarning): pass @@ -122,9 +154,9 @@ class CDeprecationWarning(DeprecationWarning): pass -class IncompleteStream(Exception): - """Found the end of a stream of data, but the data is not yet complete.""" +class FixupWarning(UserWarning): + pass -class ChordError(Exception): - """A task part of the chord raised an exception.""" +class DuplicateNodenameWarning(UserWarning): + """Multiple workers are using the same nodename.""" diff --git a/awx/lib/site-packages/celery/five.py b/awx/lib/site-packages/celery/five.py new file mode 100644 index 0000000000..23df8458a1 --- /dev/null +++ b/awx/lib/site-packages/celery/five.py @@ -0,0 +1,387 @@ +# -*- coding: utf-8 -*- +""" + celery.five + ~~~~~~~~~~~ + + Compatibility implementations of features + only available in newer Python versions. + + +""" +from __future__ import absolute_import + +__all__ = ['Counter', 'reload', 'UserList', 'UserDict', 'Queue', 'Empty', + 'zip_longest', 'StringIO', 'BytesIO', 'map', 'string', 'string_t', + 'long_t', 'text_t', 'range', 'int_types', 'items', 'keys', 'values', + 'nextfun', 'reraise', 'WhateverIO', 'with_metaclass', + 'OrderedDict', 'THREAD_TIMEOUT_MAX', 'format_d', + 'class_property', 'reclassmethod', 'create_module', + 'recreate_module', 'monotonic'] + +try: + from collections import Counter +except ImportError: # pragma: no cover + from collections import defaultdict + + def Counter(): # noqa + return defaultdict(int) + +############## py3k ######################################################### +import sys +PY3 = sys.version_info[0] == 3 + +try: + reload = reload # noqa +except NameError: # pragma: no cover + from imp import reload # noqa + +try: + from UserList import UserList # noqa +except ImportError: # pragma: no cover + from collections import UserList # noqa + +try: + from UserDict import UserDict # noqa +except ImportError: # pragma: no cover + from collections import UserDict # noqa + + +from kombu.five import monotonic + +if PY3: # pragma: no cover + import builtins + + from queue import Queue, Empty + from itertools import zip_longest + from io import StringIO, BytesIO + + map = map + string = str + string_t = str + long_t = int + text_t = str + range = range + int_types = (int, ) + + open_fqdn = 'builtins.open' + + def items(d): + return d.items() + + def keys(d): + return d.keys() + + def values(d): + return d.values() + + def nextfun(it): + return it.__next__ + + exec_ = getattr(builtins, 'exec') + + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + + class WhateverIO(StringIO): + + def write(self, data): + if isinstance(data, bytes): + data = data.encode() + StringIO.write(self, data) + +else: + import __builtin__ as builtins # noqa + from Queue import Queue, Empty # noqa + from itertools import imap as map, izip_longest as zip_longest # noqa + from StringIO import StringIO # noqa + string = unicode # noqa + string_t = basestring # noqa + text_t = unicode + long_t = long # noqa + range = xrange + int_types = (int, long) + + open_fqdn = '__builtin__.open' + + def items(d): # noqa + return d.iteritems() + + def keys(d): # noqa + return d.iterkeys() + + def values(d): # noqa + return d.itervalues() + + def nextfun(it): # noqa + return it.next + + def exec_(code, globs=None, locs=None): # pragma: no cover + """Execute code in a namespace.""" + if globs is None: + frame = sys._getframe(1) + globs = frame.f_globals + if locs is None: + locs = frame.f_locals + del frame + elif locs is None: + locs = globs + exec("""exec code in globs, locs""") + + exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""") + + BytesIO = WhateverIO = StringIO # noqa + + +def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])): + """Class decorator to set metaclass. + + Works with both Python 2 and Python 3 and it does not add + an extra class in the lookup order like ``six.with_metaclass`` does + (that is -- it copies the original class instead of using inheritance). + + """ + + def _clone_with_metaclass(Class): + attrs = dict((key, value) for key, value in items(vars(Class)) + if key not in skip_attrs) + return Type(Class.__name__, Class.__bases__, attrs) + + return _clone_with_metaclass + + +############## collections.OrderedDict ###################################### +# was moved to kombu +from kombu.utils.compat import OrderedDict # noqa + +############## threading.TIMEOUT_MAX ####################################### +try: + from threading import TIMEOUT_MAX as THREAD_TIMEOUT_MAX +except ImportError: + THREAD_TIMEOUT_MAX = 1e10 # noqa + +############## format(int, ',d') ########################## + +if sys.version_info >= (2, 7): # pragma: no cover + def format_d(i): + return format(i, ',d') +else: # pragma: no cover + def format_d(i): # noqa + s = '%d' % i + groups = [] + while s and s[-1].isdigit(): + groups.append(s[-3:]) + s = s[:-3] + return s + ','.join(reversed(groups)) + + +############## Module Generation ########################## + +# Utilities to dynamically +# recreate modules, either for lazy loading or +# to create old modules at runtime instead of +# having them litter the source tree. +import operator +import sys + +# import fails in python 2.5. fallback to reduce in stdlib +try: + from functools import reduce +except ImportError: + pass + +from importlib import import_module +from types import ModuleType + +MODULE_DEPRECATED = """ +The module %s is deprecated and will be removed in a future version. +""" + +DEFAULT_ATTRS = set(['__file__', '__path__', '__doc__', '__all__']) + +# im_func is no longer available in Py3. +# instead the unbound method itself can be used. +if sys.version_info[0] == 3: # pragma: no cover + def fun_of_method(method): + return method +else: + def fun_of_method(method): # noqa + return method.im_func + + +def getappattr(path): + """Gets attribute from the current_app recursively, + e.g. getappattr('amqp.get_task_consumer')``.""" + from celery import current_app + return current_app._rgetattr(path) + + +def _compat_task_decorator(*args, **kwargs): + from celery import current_app + kwargs.setdefault('accept_magic_kwargs', True) + return current_app.task(*args, **kwargs) + + +def _compat_periodic_task_decorator(*args, **kwargs): + from celery.task import periodic_task + kwargs.setdefault('accept_magic_kwargs', True) + return periodic_task(*args, **kwargs) + + +COMPAT_MODULES = { + 'celery': { + 'execute': { + 'send_task': 'send_task', + }, + 'decorators': { + 'task': _compat_task_decorator, + 'periodic_task': _compat_periodic_task_decorator, + }, + 'log': { + 'get_default_logger': 'log.get_default_logger', + 'setup_logger': 'log.setup_logger', + 'setup_loggig_subsystem': 'log.setup_logging_subsystem', + 'redirect_stdouts_to_logger': 'log.redirect_stdouts_to_logger', + }, + 'messaging': { + 'TaskPublisher': 'amqp.TaskPublisher', + 'TaskConsumer': 'amqp.TaskConsumer', + 'establish_connection': 'connection', + 'get_consumer_set': 'amqp.TaskConsumer', + }, + 'registry': { + 'tasks': 'tasks', + }, + }, + 'celery.task': { + 'control': { + 'broadcast': 'control.broadcast', + 'rate_limit': 'control.rate_limit', + 'time_limit': 'control.time_limit', + 'ping': 'control.ping', + 'revoke': 'control.revoke', + 'discard_all': 'control.purge', + 'inspect': 'control.inspect', + }, + 'schedules': 'celery.schedules', + 'chords': 'celery.canvas', + } +} + + +class class_property(object): + + def __init__(self, getter=None, setter=None): + if getter is not None and not isinstance(getter, classmethod): + getter = classmethod(getter) + if setter is not None and not isinstance(setter, classmethod): + setter = classmethod(setter) + self.__get = getter + self.__set = setter + + info = getter.__get__(object) # just need the info attrs. + self.__doc__ = info.__doc__ + self.__name__ = info.__name__ + self.__module__ = info.__module__ + + def __get__(self, obj, type=None): + if obj and type is None: + type = obj.__class__ + return self.__get.__get__(obj, type)() + + def __set__(self, obj, value): + if obj is None: + return self + return self.__set.__get__(obj)(value) + + def setter(self, setter): + return self.__class__(self.__get, setter) + + +def reclassmethod(method): + return classmethod(fun_of_method(method)) + + +class MagicModule(ModuleType): + _compat_modules = () + _all_by_module = {} + _direct = {} + _object_origins = {} + + def __getattr__(self, name): + if name in self._object_origins: + module = __import__(self._object_origins[name], None, None, [name]) + for item in self._all_by_module[module.__name__]: + setattr(self, item, getattr(module, item)) + return getattr(module, name) + elif name in self._direct: # pragma: no cover + module = __import__(self._direct[name], None, None, [name]) + setattr(self, name, module) + return module + return ModuleType.__getattribute__(self, name) + + def __dir__(self): + return list(set(self.__all__) | DEFAULT_ATTRS) + + def __reduce__(self): + return import_module, (self.__name__, ) + + +def create_module(name, attrs, cls_attrs=None, pkg=None, + base=MagicModule, prepare_attr=None): + fqdn = '.'.join([pkg.__name__, name]) if pkg else name + cls_attrs = {} if cls_attrs is None else cls_attrs + pkg, _, modname = name.rpartition('.') + cls_attrs['__module__'] = pkg + + attrs = dict((attr_name, prepare_attr(attr) if prepare_attr else attr) + for attr_name, attr in items(attrs)) + module = sys.modules[fqdn] = type(modname, (base, ), cls_attrs)(fqdn) + module.__dict__.update(attrs) + return module + + +def recreate_module(name, compat_modules=(), by_module={}, direct={}, + base=MagicModule, **attrs): + old_module = sys.modules[name] + origins = get_origins(by_module) + compat_modules = COMPAT_MODULES.get(name, ()) + + cattrs = dict( + _compat_modules=compat_modules, + _all_by_module=by_module, _direct=direct, + _object_origins=origins, + __all__=tuple(set(reduce( + operator.add, + [tuple(v) for v in [compat_modules, origins, direct, attrs]], + ))), + ) + new_module = create_module(name, attrs, cls_attrs=cattrs, base=base) + new_module.__dict__.update(dict((mod, get_compat_module(new_module, mod)) + for mod in compat_modules)) + return old_module, new_module + + +def get_compat_module(pkg, name): + from .local import Proxy + + def prepare(attr): + if isinstance(attr, string_t): + return Proxy(getappattr, (attr, )) + return attr + + attrs = COMPAT_MODULES[pkg.__name__][name] + if isinstance(attrs, string_t): + fqdn = '.'.join([pkg.__name__, name]) + module = sys.modules[fqdn] = import_module(attrs) + return module + attrs['__all__'] = list(attrs) + return create_module(name, dict(attrs), pkg=pkg, prepare_attr=prepare) + + +def get_origins(defs): + origins = {} + for module, attrs in items(defs): + origins.update(dict((attr, module) for attr in attrs)) + return origins diff --git a/awx/lib/site-packages/celery/tests/utilities/__init__.py b/awx/lib/site-packages/celery/fixups/__init__.py similarity index 100% rename from awx/lib/site-packages/celery/tests/utilities/__init__.py rename to awx/lib/site-packages/celery/fixups/__init__.py diff --git a/awx/lib/site-packages/celery/fixups/django.py b/awx/lib/site-packages/celery/fixups/django.py new file mode 100644 index 0000000000..a71b329d75 --- /dev/null +++ b/awx/lib/site-packages/celery/fixups/django.py @@ -0,0 +1,204 @@ +from __future__ import absolute_import + +import os +import sys +import warnings + +from kombu.utils import symbol_by_name + +from datetime import datetime +from importlib import import_module + +from celery import signals +from celery.exceptions import FixupWarning + +__all__ = ['DjangoFixup', 'fixup'] + +ERR_NOT_INSTALLED = """\ +Environment variable DJANGO_SETTINGS_MODULE is defined +but Django is not installed. Will not apply Django fixups! +""" + + +def _maybe_close_fd(fh): + try: + os.close(fh.fileno()) + except (AttributeError, OSError, TypeError): + # TypeError added for celery#962 + pass + + +def fixup(app, env='DJANGO_SETTINGS_MODULE'): + SETTINGS_MODULE = os.environ.get(env) + if SETTINGS_MODULE and 'django' not in app.loader_cls.lower(): + try: + import django # noqa + except ImportError: + warnings.warn(FixupWarning(ERR_NOT_INSTALLED)) + else: + return DjangoFixup(app).install() + + +class DjangoFixup(object): + _db_recycles = 0 + + def __init__(self, app): + self.app = app + self.db_reuse_max = self.app.conf.get('CELERY_DB_REUSE_MAX', None) + self._db = import_module('django.db') + self._cache = import_module('django.core.cache') + self._settings = symbol_by_name('django.conf:settings') + self._mail_admins = symbol_by_name('django.core.mail:mail_admins') + + # Current time and date + try: + self._now = symbol_by_name('django.utils.timezone:now') + except ImportError: # pre django-1.4 + self._now = datetime.now # noqa + + # Database-related exceptions. + DatabaseError = symbol_by_name('django.db:DatabaseError') + try: + import MySQLdb as mysql + _my_database_errors = (mysql.DatabaseError, + mysql.InterfaceError, + mysql.OperationalError) + except ImportError: + _my_database_errors = () # noqa + try: + import psycopg2 as pg + _pg_database_errors = (pg.DatabaseError, + pg.InterfaceError, + pg.OperationalError) + except ImportError: + _pg_database_errors = () # noqa + try: + import sqlite3 + _lite_database_errors = (sqlite3.DatabaseError, + sqlite3.InterfaceError, + sqlite3.OperationalError) + except ImportError: + _lite_database_errors = () # noqa + try: + import cx_Oracle as oracle + _oracle_database_errors = (oracle.DatabaseError, + oracle.InterfaceError, + oracle.OperationalError) + except ImportError: + _oracle_database_errors = () # noqa + + try: + self._close_old_connections = symbol_by_name( + 'django.db:close_old_connections', + ) + except (ImportError, AttributeError): + self._close_old_connections = None + self.database_errors = ( + (DatabaseError, ) + + _my_database_errors + + _pg_database_errors + + _lite_database_errors + + _oracle_database_errors + ) + + def install(self): + # Need to add project directory to path + sys.path.append(os.getcwd()) + signals.beat_embedded_init.connect(self.close_database) + signals.worker_ready.connect(self.on_worker_ready) + signals.task_prerun.connect(self.on_task_prerun) + signals.task_postrun.connect(self.on_task_postrun) + signals.worker_init.connect(self.on_worker_init) + signals.worker_process_init.connect(self.on_worker_process_init) + + self.app.loader.now = self.now + self.app.loader.mail_admins = self.mail_admins + + return self + + def now(self, utc=False): + return datetime.utcnow() if utc else self._now() + + def mail_admins(self, subject, body, fail_silently=False, **kwargs): + return self._mail_admins(subject, body, fail_silently=fail_silently) + + def on_worker_init(self, **kwargs): + """Called when the worker starts. + + Automatically discovers any ``tasks.py`` files in the applications + listed in ``INSTALLED_APPS``. + + """ + self.close_database() + self.close_cache() + + def on_worker_process_init(self, **kwargs): + # the parent process may have established these, + # so need to close them. + + # calling db.close() on some DB connections will cause + # the inherited DB conn to also get broken in the parent + # process so we need to remove it without triggering any + # network IO that close() might cause. + try: + for c in self._db.connections.all(): + if c and c.connection: + _maybe_close_fd(c.connection) + except AttributeError: + if self._db.connection and self._db.connection.connection: + _maybe_close_fd(self._db.connection.connection) + + # use the _ version to avoid DB_REUSE preventing the conn.close() call + self._close_database() + self.close_cache() + + def on_task_prerun(self, sender, **kwargs): + """Called before every task.""" + if not getattr(sender.request, 'is_eager', False): + self.close_database() + + def on_task_postrun(self, sender, **kwargs): + # See http://groups.google.com/group/django-users/ + # browse_thread/thread/78200863d0c07c6d/ + if not getattr(sender.request, 'is_eager', False): + self.close_database() + self.close_cache() + + def close_database(self, **kwargs): + if self._close_old_connections: + return self._close_old_connections() # Django 1.6 + if not self.db_reuse_max: + return self._close_database() + if self._db_recycles >= self.db_reuse_max * 2: + self._db_recycles = 0 + self._close_database() + self._db_recycles += 1 + + def _close_database(self): + try: + funs = [conn.close for conn in self._db.connections] + except AttributeError: + if hasattr(self._db, 'close_old_connections'): # django 1.6 + funs = [self._db.close_old_connections] + else: + # pre multidb, pending deprication in django 1.6 + funs = [self._db.close_connection] + + for close in funs: + try: + close() + except self.database_errors as exc: + str_exc = str(exc) + if 'closed' not in str_exc and 'not connected' not in str_exc: + raise + + def close_cache(self): + try: + self._cache.cache.close() + except (TypeError, AttributeError): + pass + + def on_worker_ready(self, **kwargs): + if self._settings.DEBUG: + warnings.warn('Using settings.DEBUG leads to a memory leak, never ' + 'use this setting in production environments!') diff --git a/awx/lib/site-packages/celery/loaders/__init__.py b/awx/lib/site-packages/celery/loaders/__init__.py index 1bd2baafcb..0ff5a6f747 100644 --- a/awx/lib/site-packages/celery/loaders/__init__.py +++ b/awx/lib/site-packages/celery/loaders/__init__.py @@ -13,6 +13,8 @@ from celery._state import current_app from celery.utils import deprecated from celery.utils.imports import symbol_by_name, import_from_cwd +__all__ = ['get_loader_cls'] + LOADER_ALIASES = {'app': 'celery.loaders.app:AppLoader', 'default': 'celery.loaders.default:Loader', 'django': 'djcelery.loaders:DjangoLoader'} diff --git a/awx/lib/site-packages/celery/loaders/app.py b/awx/lib/site-packages/celery/loaders/app.py index 54f6853a0e..87f034bf61 100644 --- a/awx/lib/site-packages/celery/loaders/app.py +++ b/awx/lib/site-packages/celery/loaders/app.py @@ -10,6 +10,8 @@ from __future__ import absolute_import from .base import BaseLoader +__all__ = ['AppLoader'] + class AppLoader(BaseLoader): pass diff --git a/awx/lib/site-packages/celery/loaders/base.py b/awx/lib/site-packages/celery/loaders/base.py index 1c3abd467a..4caed2f318 100644 --- a/awx/lib/site-packages/celery/loaders/base.py +++ b/awx/lib/site-packages/celery/loaders/base.py @@ -9,6 +9,7 @@ from __future__ import absolute_import import anyjson +import imp as _imp import importlib import os import re @@ -19,26 +20,23 @@ from datetime import datetime from kombu.utils import cached_property from kombu.utils.encoding import safe_str -from celery.datastructures import DictAttribute -from celery.exceptions import ImproperlyConfigured +from celery.datastructures import DictAttribute, force_mapping +from celery.five import reraise, string_t +from celery.utils.functional import maybe_list from celery.utils.imports import ( import_from_cwd, symbol_by_name, NotAPackage, find_module, ) -from celery.utils.functional import maybe_list -ERROR_ENVVAR_NOT_SET = """\ -The environment variable %r is not set, -and as such the configuration could not be loaded. -Please set this variable and make it point to -a configuration module.""" +__all__ = ['BaseLoader'] -CONFIG_INVALID_NAME = """ -Error: Module '%(module)s' doesn't exist, or it's not a valid \ +_RACE_PROTECTION = False +CONFIG_INVALID_NAME = """\ +Error: Module '{module}' doesn't exist, or it's not a valid \ Python module name. """ -CONFIG_WITH_SUFFIX = CONFIG_INVALID_NAME + """ -Did you mean '%(suggest)s'? +CONFIG_WITH_SUFFIX = CONFIG_INVALID_NAME + """\ +Did you mean '{suggest}'? """ @@ -63,15 +61,13 @@ class BaseLoader(object): """ builtin_modules = frozenset() configured = False - error_envvar_not_set = ERROR_ENVVAR_NOT_SET override_backends = {} worker_initialized = False _conf = None - def __init__(self, app=None, **kwargs): - from celery.app import app_or_default - self.app = app_or_default(app) + def __init__(self, app, **kwargs): + self.app = app self.task_modules = set() def now(self, utc=True): @@ -136,39 +132,41 @@ class BaseLoader(object): def init_worker_process(self): self.on_worker_process_init() - def config_from_envvar(self, variable_name, silent=False): - module_name = os.environ.get(variable_name) - if not module_name: - if silent: - return False - raise ImproperlyConfigured(self.error_envvar_not_set % module_name) - return self.config_from_object(module_name, silent=silent) - def config_from_object(self, obj, silent=False): - if isinstance(obj, basestring): + if isinstance(obj, string_t): try: - if '.' in obj: - obj = symbol_by_name(obj, imp=self.import_from_cwd) - else: - obj = self.import_from_cwd(obj) + obj = self._smart_import(obj, imp=self.import_from_cwd) except (ImportError, AttributeError): if silent: return False raise - if not hasattr(obj, '__getitem__'): - obj = DictAttribute(obj) - self._conf = obj + self._conf = force_mapping(obj) return True + def _smart_import(self, path, imp=None): + imp = self.import_module if imp is None else imp + if ':' in path: + # Path includes attribute so can just jump here. + # e.g. ``os.path:abspath``. + return symbol_by_name(path, imp=imp) + + # Not sure if path is just a module name or if it includes an + # attribute name (e.g. ``os.path``, vs, ``os.path.abspath`` + try: + return imp(path) + except ImportError: + # Not a module name, so try module + attribute. + return symbol_by_name(path, imp=imp) + def _import_config_module(self, name): try: self.find_module(name) except NotAPackage: if name.endswith('.py'): - raise NotAPackage, NotAPackage(CONFIG_WITH_SUFFIX % { - 'module': name, 'suggest': name[:-3]}), sys.exc_info()[2] - raise NotAPackage, NotAPackage( - CONFIG_INVALID_NAME % {'module': name}), sys.exc_info()[2] + reraise(NotAPackage, NotAPackage(CONFIG_WITH_SUFFIX.format( + module=name, suggest=name[:-3])), sys.exc_info()[2]) + reraise(NotAPackage, NotAPackage(CONFIG_INVALID_NAME.format( + module=name)), sys.exc_info()[2]) else: return self.import_from_cwd(name) @@ -188,7 +186,7 @@ class BaseLoader(object): def getarg(arg): """Parse a single configuration definition from - the command line.""" + the command-line.""" ## find key/value # ns.key=value|ns_key=value (case insensitive) @@ -215,11 +213,11 @@ class BaseLoader(object): else: try: value = NAMESPACES[ns][key].to_python(value) - except ValueError, exc: + except ValueError as exc: # display key name in error message. - raise ValueError('%r: %s' % (ns_key, exc)) + raise ValueError('{0!r}: {1}'.format(ns_key, exc)) return ns_key, value - return dict(getarg(v) for v in args) + return dict(getarg(arg) for arg in args) def mail_admins(self, subject, body, fail_silently=False, sender=None, to=None, host=None, port=None, @@ -234,16 +232,22 @@ class BaseLoader(object): use_tls=use_tls) mailer.send(message, fail_silently=fail_silently) - def read_configuration(self): + def read_configuration(self, env='CELERY_CONFIG_MODULE'): try: - custom_config = os.environ['CELERY_CONFIG_MODULE'] + custom_config = os.environ[env] except KeyError: pass else: - usercfg = self._import_config_module(custom_config) - return DictAttribute(usercfg) + if custom_config: + usercfg = self._import_config_module(custom_config) + return DictAttribute(usercfg) return {} + def autodiscover_tasks(self, packages, related_name='tasks'): + self.task_modules.update( + mod.__name__ for mod in autodiscover_tasks(packages, + related_name) if mod) + @property def conf(self): """Loader configuration.""" @@ -254,3 +258,32 @@ class BaseLoader(object): @cached_property def mail(self): return self.import_module('celery.utils.mail') + + +def autodiscover_tasks(packages, related_name='tasks'): + global _RACE_PROTECTION + + if _RACE_PROTECTION: + return + _RACE_PROTECTION = True + try: + return [find_related_module(pkg, related_name) for pkg in packages] + finally: + _RACE_PROTECTION = False + + +def find_related_module(package, related_name): + """Given a package name and a module name, tries to find that + module.""" + + try: + pkg_path = importlib.import_module(package).__path__ + except AttributeError: + return + + try: + _imp.find_module(related_name, pkg_path) + except ImportError: + return + + return importlib.import_module('{0}.{1}'.format(package, related_name)) diff --git a/awx/lib/site-packages/celery/loaders/default.py b/awx/lib/site-packages/celery/loaders/default.py index f695538cba..60714805e6 100644 --- a/awx/lib/site-packages/celery/loaders/default.py +++ b/awx/lib/site-packages/celery/loaders/default.py @@ -17,6 +17,8 @@ from celery.utils import strtobool from .base import BaseLoader +__all__ = ['Loader', 'DEFAULT_CONFIG_MODULE'] + DEFAULT_CONFIG_MODULE = 'celeryconfig' #: Warns if configuration file is missing if :envvar:`C_WNOCONF` is set. @@ -29,7 +31,7 @@ class Loader(BaseLoader): def setup_settings(self, settingsdict): return DictAttribute(settingsdict) - def read_configuration(self): + def read_configuration(self, fail_silently=True): """Read configuration from :file:`celeryconfig.py` and configure celery and Django so it can be used by regular Python.""" configname = os.environ.get('CELERY_CONFIG_MODULE', @@ -37,11 +39,13 @@ class Loader(BaseLoader): try: usercfg = self._import_config_module(configname) except ImportError: + if not fail_silently: + raise # billiard sets this if forked using execv if C_WNOCONF and not os.environ.get('FORKED_BY_MULTIPROCESSING'): warnings.warn(NotConfigured( - 'No %r module found! Please make sure it exists and ' - 'is available to Python.' % (configname, ))) + 'No {module} module found! Please make sure it exists and ' + 'is available to Python.'.format(module=configname))) return self.setup_settings({}) else: self.configured = True diff --git a/awx/lib/site-packages/celery/local.py b/awx/lib/site-packages/celery/local.py index f54de36c4b..db4afd0b63 100644 --- a/awx/lib/site-packages/celery/local.py +++ b/awx/lib/site-packages/celery/local.py @@ -13,9 +13,10 @@ from __future__ import absolute_import import importlib -import sys -PY3 = sys.version_info[0] == 3 +from .five import long_t, string + +__all__ = ['Proxy', 'PromiseProxy', 'try_import', 'maybe_evaluate'] __module__ = __name__ # used by Proxy class body @@ -35,89 +36,11 @@ def _default_cls_attr(name, type_, cls_value): def __get__(self, obj, cls=None): return self.__getter(obj) if obj is not None else self - def __set__(self, obj, value): - raise AttributeError('readonly attribute') - return type(name, (type_, ), { - '__new__': __new__, '__get__': __get__, '__set__': __set__, + '__new__': __new__, '__get__': __get__, }) -class _cls_spec(str): - - def __new__(cls, getter): - s = str.__new__(cls, getter.__module__) - s.__getter = getter - return s - - def __get__(self, obj, cls=None): - if obj is not None: - return self.__getter(obj) - return self - - def __set__(self, obj, value): - raise AttributeError('cannot set attribute') - - -def symbol_by_name(name, aliases={}, imp=None, package=None, - sep='.', default=None, **kwargs): - """Get symbol by qualified name. - - The name should be the full dot-separated path to the class:: - - modulename.ClassName - - Example:: - - celery.concurrency.processes.TaskPool - ^- class name - - or using ':' to separate module and symbol:: - - celery.concurrency.processes:TaskPool - - If `aliases` is provided, a dict containing short name/long name - mappings, the name is looked up in the aliases first. - - Examples: - - >>> symbol_by_name('celery.concurrency.processes.TaskPool') - - - >>> symbol_by_name('default', { - ... 'default': 'celery.concurrency.processes.TaskPool'}) - - - # Does not try to look up non-string names. - >>> from celery.concurrency.processes import TaskPool - >>> symbol_by_name(TaskPool) is TaskPool - True - - """ - if imp is None: - imp = importlib.import_module - - if not isinstance(name, basestring): - return name # already a class - - name = aliases.get(name) or name - sep = ':' if ':' in name else sep - module_name, _, cls_name = name.rpartition(sep) - if not module_name: - cls_name, module_name = None, package if package else cls_name - try: - try: - module = imp(module_name, package=package, **kwargs) - except ValueError, exc: - raise ValueError, ValueError( - "Couldn't import %r: %s" % (name, exc)), sys.exc_info()[2] - return getattr(module, cls_name) if cls_name else module - except (ImportError, AttributeError): - if default is None: - raise - return default - - def try_import(module, default=None): """Try to import and return module, or return None if the module does not exist.""" @@ -132,15 +55,16 @@ class Proxy(object): # Code stolen from werkzeug.local.Proxy. __slots__ = ('__local', '__args', '__kwargs', '__dict__') - if not PY3: - __slots__ += ('__name__', ) - def __init__(self, local, args=None, kwargs=None, name=None): + def __init__(self, local, + args=None, kwargs=None, name=None, __doc__=None): object.__setattr__(self, '_Proxy__local', local) object.__setattr__(self, '_Proxy__args', args or ()) object.__setattr__(self, '_Proxy__kwargs', kwargs or {}) if name is not None: object.__setattr__(self, '__custom_name__', name) + if __doc__ is not None: + object.__setattr__(self, '__doc__', __doc__) @_default_cls_attr('name', str, __name__) def __name__(self): @@ -175,7 +99,7 @@ class Proxy(object): try: return getattr(loc, self.__name__) except AttributeError: - raise RuntimeError('no object bound to %s' % self.__name__) + raise RuntimeError('no object bound to {0.__name__}'.format(self)) @property def __dict__(self): @@ -188,18 +112,19 @@ class Proxy(object): try: obj = self._get_current_object() except RuntimeError: # pragma: no cover - return '<%s unbound>' % self.__class__.__name__ + return '<{0} unbound>'.format(self.__class__.__name__) return repr(obj) - def __nonzero__(self): + def __bool__(self): try: return bool(self._get_current_object()) except RuntimeError: # pragma: no cover return False + __nonzero__ = __bool__ # Py2 def __unicode__(self): try: - return unicode(self._get_current_object()) + return string(self._get_current_object()) except RuntimeError: # pragma: no cover return repr(self) @@ -263,12 +188,12 @@ class Proxy(object): __invert__ = lambda x: ~(x._get_current_object()) __complex__ = lambda x: complex(x._get_current_object()) __int__ = lambda x: int(x._get_current_object()) - __long__ = lambda x: long(x._get_current_object()) + __long__ = lambda x: long_t(x._get_current_object()) __float__ = lambda x: float(x._get_current_object()) __oct__ = lambda x: oct(x._get_current_object()) __hex__ = lambda x: hex(x._get_current_object()) __index__ = lambda x: x._get_current_object().__index__() - __coerce__ = lambda x, o: x.__coerce__(x, o) + __coerce__ = lambda x, o: x._get_current_object().__coerce__(o) __enter__ = lambda x: x._get_current_object().__enter__() __exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw) __reduce__ = lambda x: x._get_current_object().__reduce__() @@ -298,15 +223,21 @@ class PromiseProxy(Proxy): def __maybe_evaluate__(self): return self._get_current_object() - def __evaluate__(self): + def __evaluate__(self, + _clean=('_Proxy__local', + '_Proxy__args', + '_Proxy__kwargs')): try: thing = Proxy._get_current_object(self) object.__setattr__(self, '__thing', thing) return thing finally: - object.__delattr__(self, '_Proxy__local') - object.__delattr__(self, '_Proxy__args') - object.__delattr__(self, '_Proxy__kwargs') + for attr in _clean: + try: + object.__delattr__(self, attr) + except AttributeError: # pragma: no cover + # May mask errors so ignore + pass def maybe_evaluate(obj): diff --git a/awx/lib/site-packages/celery/platforms.py b/awx/lib/site-packages/celery/platforms.py index bc2b40a9a2..6163a66844 100644 --- a/awx/lib/site-packages/celery/platforms.py +++ b/awx/lib/site-packages/celery/platforms.py @@ -7,28 +7,42 @@ users, groups, and so on. """ -from __future__ import absolute_import -from __future__ import with_statement +from __future__ import absolute_import, print_function import atexit import errno +import math import os import platform as _platform -import shlex import signal as _signal import sys +from collections import namedtuple + from billiard import current_process +# fileno used to be in this module +from kombu.utils import maybe_fileno +from kombu.utils.compat import get_errno from kombu.utils.encoding import safe_str from contextlib import contextmanager from .local import try_import +from .five import items, range, reraise, string_t, zip_longest +from .utils.functional import uniq _setproctitle = try_import('setproctitle') resource = try_import('resource') pwd = try_import('pwd') grp = try_import('grp') +__all__ = ['EX_OK', 'EX_FAILURE', 'EX_UNAVAILABLE', 'EX_USAGE', 'SYSTEM', + 'IS_OSX', 'IS_WINDOWS', 'pyimplementation', 'LockFailed', + 'get_fdmax', 'Pidfile', 'create_pidlock', + 'close_open_fds', 'DaemonContext', 'detached', 'parse_uid', + 'parse_gid', 'setgroups', 'initgroups', 'setgid', 'setuid', + 'maybe_drop_privileges', 'signals', 'set_process_title', + 'set_mp_process_title', 'get_errno_name', 'ignore_errno'] + # exitcodes EX_OK = getattr(os, 'EX_OK', 0) EX_FAILURE = 1 @@ -45,18 +59,14 @@ DAEMON_WORKDIR = '/' PIDFILE_FLAGS = os.O_CREAT | os.O_EXCL | os.O_WRONLY PIDFILE_MODE = ((os.R_OK | os.W_OK) << 6) | ((os.R_OK) << 3) | ((os.R_OK)) -PIDLOCKED = """ERROR: Pidfile (%s) already exists. -Seems we're already running? (pid: %s)""" +PIDLOCKED = """ERROR: Pidfile ({0}) already exists. +Seems we're already running? (pid: {1})""" -try: - from io import UnsupportedOperation - FILENO_ERRORS = (AttributeError, UnsupportedOperation) -except ImportError: # Py2 - FILENO_ERRORS = (AttributeError, ) # noqa +_range = namedtuple('_range', ('start', 'stop')) def pyimplementation(): - """Returns string identifying the current Python implementation.""" + """Return string identifying the current Python implementation.""" if hasattr(_platform, 'python_implementation'): return _platform.python_implementation() elif sys.platform.startswith('java'): @@ -70,51 +80,24 @@ def pyimplementation(): return 'CPython' -def _find_option_with_arg(argv, short_opts=None, long_opts=None): - """Search argv for option specifying its short and longopt - alternatives. - - Returns the value of the option if found. - - """ - for i, arg in enumerate(argv): - if arg.startswith('-'): - if long_opts and arg.startswith('--'): - name, _, val = arg.partition('=') - if name in long_opts: - return val - if short_opts and arg in short_opts: - return argv[i + 1] - raise KeyError('|'.join(short_opts or [] + long_opts or [])) - - -def maybe_patch_concurrency(argv, short_opts=None, long_opts=None): - """With short and long opt alternatives that specify the command line - option to set the pool, this makes sure that anything that needs - to be patched is completed as early as possible. - (e.g. eventlet/gevent monkey patches).""" - try: - pool = _find_option_with_arg(argv, short_opts, long_opts) - except KeyError: - pass - else: - # set up eventlet/gevent environments ASAP. - from celery import concurrency - concurrency.get_implementation(pool) - - class LockFailed(Exception): """Raised if a pidlock can't be acquired.""" def get_fdmax(default=None): - """Returns the maximum number of open file descriptors + """Return the maximum number of open file descriptors on this system. :keyword default: Value returned if there's no file descriptor limit. """ + try: + return os.sysconf('SC_OPEN_MAX') + except: + pass + if resource is None: # Windows + return default fdmax = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if fdmax == resource.RLIM_INFINITY: return default @@ -142,13 +125,13 @@ class Pidfile(object): """Acquire lock.""" try: self.write_pid() - except OSError, exc: - raise LockFailed, LockFailed(str(exc)), sys.exc_info()[2] + except OSError as exc: + reraise(LockFailed, LockFailed(str(exc)), sys.exc_info()[2]) return self __enter__ = acquire def is_locked(self): - """Returns true if the pid lock exists.""" + """Return true if the pid lock exists.""" return os.path.exists(self.path) def release(self, *args): @@ -157,32 +140,32 @@ class Pidfile(object): __exit__ = release def read_pid(self): - """Reads and returns the current pid.""" + """Read and return the current pid.""" with ignore_errno('ENOENT'): with open(self.path, 'r') as fh: line = fh.readline() if line.strip() == line: # must contain '\n' raise ValueError( - 'Partially written or invalid pidfile %r' % self.path) + 'Partial or invalid pidfile {0.path}'.format(self)) try: return int(line.strip()) except ValueError: raise ValueError( - 'pidfile %r contents invalid.' % self.path) + 'pidfile {0.path} contents invalid.'.format(self)) def remove(self): - """Removes the lock.""" + """Remove the lock.""" with ignore_errno(errno.ENOENT, errno.EACCES): os.unlink(self.path) def remove_if_stale(self): - """Removes the lock if the process is not running. + """Remove the lock if the process is not running. (does not respond to signals).""" try: pid = self.read_pid() - except ValueError, exc: - sys.stderr.write('Broken pidfile found. Removing it.\n') + except ValueError as exc: + print('Broken pidfile found. Removing it.', file=sys.stderr) self.remove() return True if not pid: @@ -191,16 +174,16 @@ class Pidfile(object): try: os.kill(pid, 0) - except os.error, exc: + except os.error as exc: if exc.errno == errno.ESRCH: - sys.stderr.write('Stale pidfile exists. Removing it.\n') + print('Stale pidfile exists. Removing it.', file=sys.stderr) self.remove() return True return False def write_pid(self): pid = os.getpid() - content = '%d\n' % (pid, ) + content = '{0}\n'.format(pid) pidfile_fd = os.open(self.path, PIDFILE_FLAGS, PIDFILE_MODE) pidfile = os.fdopen(pidfile_fd, 'w') @@ -253,31 +236,32 @@ def create_pidlock(pidfile): def _create_pidlock(pidfile): pidlock = Pidfile(pidfile) if pidlock.is_locked() and not pidlock.remove_if_stale(): - raise SystemExit(PIDLOCKED % (pidfile, pidlock.read_pid())) + raise SystemExit(PIDLOCKED.format(pidfile, pidlock.read_pid())) pidlock.acquire() return pidlock -def fileno(f): - if isinstance(f, (int, long)): - return f - return f.fileno() +if hasattr(os, 'closerange'): + def close_open_fds(keep=None): + keep = [maybe_fileno(f) + for f in uniq(sorted(keep or [])) + if maybe_fileno(f) is not None] + maxfd = get_fdmax(default=2048) + kL, kH = iter([-1] + keep), iter(keep + [maxfd]) + for low, high in zip_longest(kL, kH): + if low + 1 != high: + os.closerange(low + 1, high) -def maybe_fileno(f): - """Get object fileno, or :const:`None` if not defined.""" - try: - return fileno(f) - except FILENO_ERRORS: - pass +else: - -def close_open_fds(keep=None): - keep = [maybe_fileno(f) for f in keep if maybe_fileno(f)] if keep else [] - for fd in reversed(range(get_fdmax(default=2048))): - if fd not in keep: - with ignore_errno(errno.EBADF): - os.close(fd) + def close_open_fds(keep=None): # noqa + keep = [maybe_fileno(f) + for f in (keep or []) if maybe_fileno(f) is not None] + for fd in reversed(range(get_fdmax(default=2048))): + if fd not in keep: + with ignore_errno(errno.EBADF): + os.close(fd) class DaemonContext(object): @@ -391,7 +375,7 @@ def parse_uid(uid): """Parse user id. uid can be an integer (uid) or a string (user name), if a user name - the uid is taken from the password file. + the uid is taken from the system user registry. """ try: @@ -400,14 +384,14 @@ def parse_uid(uid): try: return pwd.getpwnam(uid).pw_uid except (AttributeError, KeyError): - raise KeyError('User does not exist: %r' % (uid, )) + raise KeyError('User does not exist: {0}'.format(uid)) def parse_gid(gid): """Parse group id. gid can be an integer (gid) or a string (group name), if a group name - the gid is taken from the password file. + the gid is taken from the system group registry. """ try: @@ -416,7 +400,7 @@ def parse_gid(gid): try: return grp.getgrnam(gid).gr_gid except (AttributeError, KeyError): - raise KeyError('Group does not exist: %r' % (gid, )) + raise KeyError('Group does not exist: {0}'.format(gid)) def _setgroups_hack(groups): @@ -432,7 +416,7 @@ def _setgroups_hack(groups): if len(groups) <= 1: raise groups[:] = groups[:-1] - except OSError, exc: # error from the OS. + except OSError as exc: # error from the OS. if exc.errno != errno.EINVAL or len(groups) <= 1: raise groups[:] = groups[:-1] @@ -447,7 +431,7 @@ def setgroups(groups): pass try: return _setgroups_hack(groups[:max_groups]) - except OSError, exc: + except OSError as exc: if exc.errno != errno.EPERM: raise if any(group not in groups for group in os.getgroups()): @@ -498,11 +482,26 @@ def maybe_drop_privileges(uid=None, gid=None): gid = pwd.getpwuid(uid).pw_gid # Must set the GID before initgroups(), as setgid() # is known to zap the group list on some platforms. + + # setgid must happen before setuid (otherwise the setgid operation + # may fail because of insufficient privileges and possibly stay + # in a privileged group). setgid(gid) initgroups(uid, gid) # at last: setuid(uid) + # ... and make sure privileges cannot be restored: + try: + setuid(0) + except OSError as exc: + if get_errno(exc) != errno.EPERM: + raise + pass # Good: cannot restore privileges. + else: + raise RuntimeError( + 'non-root user able to restore privileges after setuid.') + else: gid and setgid(gid) @@ -519,6 +518,7 @@ class Signals(object): >>> from celery.platforms import signals + >>> from proj.handlers import my_handler >>> signals['INT'] = my_handler >>> signals['INT'] @@ -538,6 +538,7 @@ class Signals(object): >>> signals['USR1'] == signals.default True + >>> from proj.handlers import exit_handler, hup_handler >>> signals.update(INT=exit_handler, ... TERM=exit_handler, ... HUP=hup_handler) @@ -547,8 +548,27 @@ class Signals(object): ignored = _signal.SIG_IGN default = _signal.SIG_DFL + if hasattr(_signal, 'setitimer'): + + def arm_alarm(self, seconds): + _signal.setitimer(_signal.ITIMER_REAL, seconds) + else: # pragma: no cover + try: + from itimer import alarm as _itimer_alarm # noqa + except ImportError: + + def arm_alarm(self, seconds): # noqa + _signal.alarm(math.ceil(seconds)) + else: # pragma: no cover + + def arm_alarm(self, seconds): # noqa + return _itimer_alarm(seconds) # noqa + + def reset_alarm(self): + return _signal.alarm(0) + def supported(self, signal_name): - """Returns true value if ``signal_name`` exists on this platform.""" + """Return true value if ``signal_name`` exists on this platform.""" try: return self.signum(signal_name) except AttributeError: @@ -558,7 +578,7 @@ class Signals(object): """Get signal number from signal name.""" if isinstance(signal_name, int): return signal_name - if not isinstance(signal_name, basestring) \ + if not isinstance(signal_name, string_t) \ or not signal_name.isupper(): raise TypeError('signal name must be uppercase string.') if not signal_name.startswith('SIG'): @@ -600,10 +620,9 @@ class Signals(object): def update(self, _d_=None, **sigmap): """Set signal handlers from a mapping.""" - for signal_name, handler in dict(_d_ or {}, **sigmap).iteritems(): + for signal_name, handler in items(dict(_d_ or {}, **sigmap)): self[signal_name] = handler - signals = Signals() get_signal = signals.signum # compat install_signal_handler = signals.__setitem__ # compat @@ -624,8 +643,8 @@ def set_process_title(progname, info=None): Only works if :mod:`setproctitle` is installed. """ - proctitle = '[%s]' % progname - proctitle = '%s %s' % (proctitle, info) if info else proctitle + proctitle = '[{0}]'.format(progname) + proctitle = '{0} {1}'.format(proctitle, info) if info else proctitle if _setproctitle: _setproctitle.setproctitle(safe_str(proctitle)) return proctitle @@ -644,27 +663,14 @@ else: """ if hostname: - progname = '%s@%s' % (progname, hostname.split('.')[0]) + progname = '{0}: {1}'.format(progname, hostname) return set_process_title( - '%s:%s' % (progname, current_process().name), info=info) + '{0}:{1}'.format(progname, current_process().name), info=info) -def shellsplit(s): - """Compat. version of :func:`shlex.split` that supports - the ``posix`` option which was first added in Python 2.6. - - Posix behavior will be disabled if running under Windows. - - """ - lexer = shlex.shlex(s, posix=not IS_WINDOWS) - lexer.whitespace_split = True - lexer.commenters = '' - return list(lexer) - - -def get_errno(n): +def get_errno_name(n): """Get errno for string, e.g. ``ENOENT``.""" - if isinstance(n, basestring): + if isinstance(n, string_t): return getattr(errno, n) return n @@ -677,8 +683,8 @@ def ignore_errno(*errnos, **kwargs): the name of the code, or the code integer itself:: >>> with ignore_errno('ENOENT'): - ... with open('foo', 'r'): - ... return r.read() + ... with open('foo', 'r') as fh: + ... return fh.read() >>> with ignore_errno(errno.ENOENT, errno.EPERM): ... pass @@ -687,10 +693,10 @@ def ignore_errno(*errnos, **kwargs): defaults to :exc:`Exception`. """ types = kwargs.get('types') or (Exception, ) - errnos = [get_errno(errno) for errno in errnos] + errnos = [get_errno_name(errno) for errno in errnos] try: yield - except types, exc: + except types as exc: if not hasattr(exc, 'errno'): raise if exc.errno not in errnos: diff --git a/awx/lib/site-packages/celery/result.py b/awx/lib/site-packages/celery/result.py index acf1ff6cbe..44ef0522bb 100644 --- a/awx/lib/site-packages/celery/result.py +++ b/awx/lib/site-packages/celery/result.py @@ -7,7 +7,6 @@ """ from __future__ import absolute_import -from __future__ import with_statement import time @@ -19,14 +18,33 @@ from kombu.utils.compat import OrderedDict from . import current_app from . import states +from ._state import task_join_will_block from .app import app_or_default -from .datastructures import DependencyGraph +from .datastructures import DependencyGraph, GraphFormatter from .exceptions import IncompleteStream, TimeoutError +from .five import items, range, string_t, monotonic + +__all__ = ['ResultBase', 'AsyncResult', 'ResultSet', 'GroupResult', + 'EagerResult', 'result_from_tuple'] + +E_WOULDBLOCK = """\ +Never call result.get() within a task! +See http://docs.celeryq.org/en/latest/userguide/tasks.html\ +#task-synchronous-subtasks +""" + + +def assert_will_not_block(): + if task_join_will_block(): + pass # TODO future version: raise class ResultBase(object): """Base class for all results""" + #: Parent result (if part of a chain) + parent = None + class AsyncResult(ResultBase): """Query task state. @@ -46,9 +64,6 @@ class AsyncResult(ResultBase): #: The task result backend to use. backend = None - #: Parent result (if part of a chain) - parent = None - def __init__(self, id, backend=None, task_name=None, app=None, parent=None): self.app = app_or_default(app or self.app) @@ -57,14 +72,17 @@ class AsyncResult(ResultBase): self.task_name = task_name self.parent = parent - def serializable(self): - return self.id, None + def as_tuple(self): + parent = self.parent + return (self.id, parent and parent.as_tuple()), None + serializable = as_tuple # XXX compat def forget(self): """Forget about (and possibly remove the result of) this task.""" self.backend.forget(self.id) - def revoke(self, connection=None, terminate=False, signal=None): + def revoke(self, connection=None, terminate=False, signal=None, + wait=False, timeout=None): """Send revoke signal to all workers. Any worker receiving the task, or having reserved the @@ -74,10 +92,15 @@ class AsyncResult(ResultBase): on the task (if any). :keyword signal: Name of signal to send to process if terminate. Default is TERM. + :keyword wait: Wait for replies from workers. Will wait for 1 second + by default or you can specify a custom ``timeout``. + :keyword timeout: Time in seconds to wait for replies if ``wait`` + enabled. """ self.app.control.revoke(self.id, connection=connection, - terminate=terminate, signal=signal) + terminate=terminate, signal=signal, + reply=wait, timeout=timeout) def get(self, timeout=None, propagate=True, interval=0.5): """Wait until task is ready, and return its result. @@ -103,11 +126,22 @@ class AsyncResult(ResultBase): be re-raised. """ + assert_will_not_block() + if propagate and self.parent: + for node in reversed(list(self._parents())): + node.get(propagate=True, timeout=timeout, interval=interval) + return self.backend.wait_for(self.id, timeout=timeout, propagate=propagate, interval=interval) wait = get # deprecated alias to :meth:`get`. + def _parents(self): + node = self.parent + while node: + yield node + node = node.parent + def collect(self, intermediate=False, **kwargs): """Iterator, like :meth:`get` will wait for the task to complete, but will also follow :class:`AsyncResult` and :class:`ResultSet` @@ -119,7 +153,7 @@ class AsyncResult(ResultBase): @task() def A(how_many): - return group(B.s(i) for i in xrange(how_many)) + return group(B.s(i) for i in range(how_many)) @task() def B(i): @@ -133,6 +167,8 @@ class AsyncResult(ResultBase): .. code-block:: python + >>> from proj.tasks import A + >>> result = A.delay(10) >>> list(result.collect()) [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] @@ -176,11 +212,13 @@ class AsyncResult(ResultBase): """Returns :const:`True` if the task failed.""" return self.state == states.FAILURE - def build_graph(self, intermediate=False): - graph = DependencyGraph() + def build_graph(self, intermediate=False, formatter=None): + graph = DependencyGraph( + formatter=formatter or GraphFormatter(root=self.id, shape='oval'), + ) for parent, node in self.iterdeps(intermediate=intermediate): + graph.add_arc(node) if parent: - graph.add_arc(parent) graph.add_edge(parent, node) return graph @@ -193,18 +231,22 @@ class AsyncResult(ResultBase): return hash(self.id) def __repr__(self): - return '<%s: %s>' % (self.__class__.__name__, self.id) + return '<{0}: {1}>'.format(type(self).__name__, self.id) def __eq__(self, other): if isinstance(other, AsyncResult): return other.id == self.id - elif isinstance(other, basestring): + elif isinstance(other, string_t): return other == self.id return NotImplemented + def __ne__(self, other): + return not self.__eq__(other) + def __copy__(self): - r = self.__reduce__() - return r[0](*r[1]) + return self.__class__( + self.id, self.backend, self.task_name, self.app, self.parent, + ) def __reduce__(self): return self.__class__, self.__reduce_args__() @@ -224,7 +266,7 @@ class AsyncResult(ResultBase): def children(self): children = self.backend.get_children(self.id) if children: - return [from_serializable(r, self.app) for r in children] + return [result_from_tuple(child, self.app) for child in children] @property def result(self): @@ -272,12 +314,14 @@ class AsyncResult(ResultBase): return self.backend.get_status(self.id) status = state - def _get_task_id(self): + @property + def task_id(self): + """compat alias to :attr:`id`""" return self.id - def _set_task_id(self, id): + @task_id.setter # noqa + def task_id(self, id): self.id = id - task_id = property(_get_task_id, _set_task_id) BaseAsyncResult = AsyncResult # for backwards compatibility. @@ -306,13 +350,13 @@ class ResultSet(ResultBase): self.results.append(result) def remove(self, result): - """Removes result from the set; it must be a member. + """Remove result from the set; it must be a member. :raises KeyError: if the result is not a member. """ - if isinstance(result, basestring): - result = AsyncResult(result) + if isinstance(result, string_t): + result = self.app.AsyncResult(result) try: self.results.remove(result) except ValueError: @@ -387,23 +431,26 @@ class ResultSet(ResultBase): for result in self.results: result.forget() - def revoke(self, connection=None, terminate=False, signal=None): + def revoke(self, connection=None, terminate=False, signal=None, + wait=False, timeout=None): """Send revoke signal to all workers for all tasks in the set. :keyword terminate: Also terminate the process currently working on the task (if any). :keyword signal: Name of signal to send to process if terminate. Default is TERM. + :keyword wait: Wait for replies from worker. Will wait for 1 second + by default or you can specify a custom ``timeout``. + :keyword timeout: Time in seconds to wait for replies if ``wait`` + enabled. """ - with self.app.connection_or_acquire(connection) as conn: - for result in self.results: - result.revoke( - connection=conn, terminate=terminate, signal=signal, - ) + self.app.control.revoke([r.id for r in self.results], + connection=connection, timeout=timeout, + terminate=terminate, signal=signal, reply=wait) def __iter__(self): - return self.iterate() + return iter(self.results) def __getitem__(self, index): """`res[i] -> res.results[i]`""" @@ -422,7 +469,7 @@ class ResultSet(ResultBase): while results: removed = set() - for task_id, result in results.iteritems(): + for task_id, result in items(results): if result.ready(): yield result.get(timeout=timeout and timeout - elapsed, propagate=propagate) @@ -437,7 +484,7 @@ class ResultSet(ResultBase): if timeout and elapsed >= timeout: raise TimeoutError('The operation timed out') - def get(self, timeout=None, propagate=True, interval=0.5): + def get(self, timeout=None, propagate=True, interval=0.5, callback=None): """See :meth:`join` This is here for API compatibility with :class:`AsyncResult`, @@ -446,9 +493,10 @@ class ResultSet(ResultBase): """ return (self.join_native if self.supports_native_join else self.join)( - timeout=timeout, propagate=propagate, interval=interval) + timeout=timeout, propagate=propagate, + interval=interval, callback=callback) - def join(self, timeout=None, propagate=True, interval=0.5): + def join(self, timeout=None, propagate=True, interval=0.5, callback=None): """Gathers the results of all tasks as a list in order. .. note:: @@ -475,27 +523,38 @@ class ResultSet(ResultBase): does not have any effect when using the amqp result store backend, as it does not use polling. + :keyword callback: Optional callback to be called for every result + received. Must have signature ``(task_id, value)`` + No results will be returned by this function if + a callback is specified. The order of results + is also arbitrary when a callback is used. + :raises celery.exceptions.TimeoutError: if `timeout` is not :const:`None` and the operation takes longer than `timeout` seconds. """ - time_start = time.time() + assert_will_not_block() + time_start = monotonic() remaining = None results = [] for result in self.results: remaining = None if timeout: - remaining = timeout - (time.time() - time_start) + remaining = timeout - (monotonic() - time_start) if remaining <= 0.0: raise TimeoutError('join operation timed out') - results.append(result.get(timeout=remaining, - propagate=propagate, - interval=interval)) + value = result.get(timeout=remaining, + propagate=propagate, + interval=interval) + if callback: + callback(result.id, value) + else: + results.append(value) return results - def iter_native(self, timeout=None, interval=None): + def iter_native(self, timeout=None, interval=0.5): """Backend optimized version of :meth:`iterate`. .. versionadded:: 2.2 @@ -507,13 +566,15 @@ class ResultSet(ResultBase): result backends. """ - if not self.results: + results = self.results + if not results: return iter([]) - backend = self.results[0].backend - ids = [result.id for result in self.results] - return backend.get_many(ids, timeout=timeout, interval=interval) + return results[0].backend.get_many( + set(r.id for r in results), timeout=timeout, interval=interval, + ) - def join_native(self, timeout=None, propagate=True, interval=0.5): + def join_native(self, timeout=None, propagate=True, + interval=0.5, callback=None): """Backend optimized version of :meth:`join`. .. versionadded:: 2.2 @@ -525,13 +586,19 @@ class ResultSet(ResultBase): result backends. """ - results = self.results - acc = [None for _ in xrange(len(self))] - for task_id, meta in self.iter_native(timeout=timeout, - interval=interval): + assert_will_not_block() + order_index = None if callback else dict( + (result.id, i) for i, result in enumerate(self.results) + ) + acc = None if callback else [None for _ in range(len(self))] + for task_id, meta in self.iter_native(timeout, interval): + value = meta['result'] if propagate and meta['status'] in states.PROPAGATE_STATES: - raise meta['result'] - acc[results.index(task_id)] = meta['result'] + raise value + if callback: + callback(task_id, value) + else: + acc[order_index[task_id]] = value return acc def _failed_join_report(self): @@ -547,9 +614,12 @@ class ResultSet(ResultBase): return other.results == self.results return NotImplemented + def __ne__(self, other): + return not self.__eq__(other) + def __repr__(self): - return '<%s: [%s]>' % (self.__class__.__name__, - ', '.join(r.id for r in self.results)) + return '<{0}: [{1}]>'.format(type(self).__name__, + ', '.join(r.id for r in self.results)) @property def subtasks(self): @@ -590,8 +660,9 @@ class GroupResult(ResultSet): Example:: - >>> result.save() - >>> result = GroupResult.restore(group_id) + >>> def save_and_restore(result): + ... result.save() + ... result = GroupResult.restore(result.id) """ return (backend or self.app.backend).save_group(self.id, self) @@ -611,12 +682,16 @@ class GroupResult(ResultSet): return other.id == self.id and other.results == self.results return NotImplemented - def __repr__(self): - return '<%s: %s [%s]>' % (self.__class__.__name__, self.id, - ', '.join(r.id for r in self.results)) + def __ne__(self, other): + return not self.__eq__(other) - def serializable(self): - return self.id, [r.serializable() for r in self.results] + def __repr__(self): + return '<{0}: {1} [{2}]>'.format(type(self).__name__, self.id, + ', '.join(r.id for r in self.results)) + + def as_tuple(self): + return self.id, [r.as_tuple() for r in self.results] + serializable = as_tuple # XXX compat @property def children(self): @@ -625,7 +700,9 @@ class GroupResult(ResultSet): @classmethod def restore(self, id, backend=None): """Restore previously saved group result.""" - return (backend or current_app.backend).restore_group(id) + return ( + backend or (self.app.backend if self.app else current_app.backend) + ).restore_group(id) class TaskSetResult(GroupResult): @@ -647,12 +724,14 @@ class TaskSetResult(GroupResult): """Deprecated: Use ``len(r)``.""" return len(self) - def _get_taskset_id(self): + @property + def taskset_id(self): + """compat alias to :attr:`self.id`""" return self.id - def _set_taskset_id(self, id): + @taskset_id.setter # noqa + def taskset_id(self, id): self.id = id - taskset_id = property(_get_taskset_id, _set_taskset_id) class EagerResult(AsyncResult): @@ -694,7 +773,7 @@ class EagerResult(AsyncResult): self._state = states.REVOKED def __repr__(self): - return '' % self.id + return ''.format(self) @property def result(self): @@ -717,17 +796,21 @@ class EagerResult(AsyncResult): return False -def from_serializable(r, app=None): +def result_from_tuple(r, app=None): # earlier backends may just pickle, so check if # result is already prepared. app = app_or_default(app) Result = app.AsyncResult if not isinstance(r, ResultBase): - if isinstance(r, (list, tuple)): - id, nodes = r - if nodes: - return app.GroupResult(id, [Result(sid) for sid, _ in nodes]) - return Result(id) - else: - return Result(r) + res, nodes = r + if nodes: + return app.GroupResult( + res, [result_from_tuple(child, app) for child in nodes], + ) + # previously did not include parent + id, parent = res if isinstance(res, (list, tuple)) else (res, None) + if parent: + parent = result_from_tuple(parent, app) + return Result(id, parent=parent) return r +from_serializable = result_from_tuple # XXX compat diff --git a/awx/lib/site-packages/celery/schedules.py b/awx/lib/site-packages/celery/schedules.py index cc7353d02c..b511955b5a 100644 --- a/awx/lib/site-packages/celery/schedules.py +++ b/awx/lib/site-packages/celery/schedules.py @@ -11,19 +11,41 @@ from __future__ import absolute_import import re +from collections import namedtuple from datetime import datetime, timedelta -from dateutil.relativedelta import relativedelta from kombu.utils import cached_property from . import current_app +from .five import range, string_t from .utils import is_iterable from .utils.timeutils import ( timedelta_seconds, weekday, maybe_timedelta, remaining, - humanize_seconds, timezone, maybe_make_aware + humanize_seconds, timezone, maybe_make_aware, ffwd ) from .datastructures import AttributeDict +__all__ = ['ParseException', 'schedule', 'crontab', 'crontab_parser', + 'maybe_schedule'] + +schedstate = namedtuple('schedstate', ('is_due', 'next')) + + +CRON_PATTERN_INVALID = """\ +Invalid crontab pattern. Valid range is {min}-{max}. \ +'{value}' was found.\ +""" + +CRON_INVALID_TYPE = """\ +Argument cronspec needs to be of any of the following types: \ +int, str, or an iterable type. {type!r} was given.\ +""" + +CRON_REPR = """\ +\ +""" + def cronfield(s): return '*' if s is None else s @@ -34,19 +56,32 @@ class ParseException(Exception): class schedule(object): + """Schedule for periodic task. + + :param run_every: Interval in seconds (or a :class:`~datetime.timedelta`). + :param relative: If set to True the run time will be rounded to the + resolution of the interval. + :param nowfun: Function returning the current date and time + (class:`~datetime.datetime`). + :param app: Celery app instance. + + """ relative = False - def __init__(self, run_every=None, relative=False, nowfun=None): + def __init__(self, run_every=None, relative=False, nowfun=None, app=None): self.run_every = maybe_timedelta(run_every) self.relative = relative self.nowfun = nowfun + self._app = app def now(self): return (self.nowfun or self.app.now)() def remaining_estimate(self, last_run_at): - return remaining(last_run_at, self.run_every, - self.maybe_make_aware(self.now()), self.relative) + return remaining( + self.maybe_make_aware(last_run_at), self.run_every, + self.maybe_make_aware(self.now()), self.relative, + ) def is_due(self, last_run_at): """Returns tuple of two items `(is_due, next_time_to_run)`, @@ -61,8 +96,8 @@ class schedule(object): You can override this to decide the interval at runtime, but keep in mind the value of :setting:`CELERYBEAT_MAX_LOOP_INTERVAL`, - which decides the maximum number of seconds celerybeat can sleep - between re-checking the periodic task intervals. So if you + which decides the maximum number of seconds the Beat scheduler can + sleep between re-checking the periodic task intervals. So if you dynamically change the next run at value, and the max interval is set to 5 minutes, it will take 5 minutes for the change to take effect, so you may consider lowering the value of @@ -78,10 +113,10 @@ class schedule(object): """ last_run_at = self.maybe_make_aware(last_run_at) rem_delta = self.remaining_estimate(last_run_at) - rem = timedelta_seconds(rem_delta) - if rem == 0: - return True, self.seconds - return False, rem + remaining_s = timedelta_seconds(rem_delta) + if remaining_s == 0: + return schedstate(is_due=True, next=self.seconds) + return schedstate(is_due=False, next=remaining_s) def maybe_make_aware(self, dt): if self.utc_enabled: @@ -89,13 +124,19 @@ class schedule(object): return dt def __repr__(self): - return '' % self.human_seconds + return ''.format(self) def __eq__(self, other): if isinstance(other, schedule): return self.run_every == other.run_every return self.run_every == other + def __ne__(self, other): + return not self.__eq__(other) + + def __reduce__(self): + return self.__class__, (self.run_every, self.relative, self.nowfun) + @property def seconds(self): return timedelta_seconds(self.run_every) @@ -104,13 +145,17 @@ class schedule(object): def human_seconds(self): return humanize_seconds(self.seconds) - @cached_property + @property def app(self): - return current_app._get_current_object() + return self._app or current_app._get_current_object() + + @app.setter # noqa + def app(self, app): + self._app = app @cached_property def tz(self): - return timezone.get_timezone(self.app.conf.CELERY_TIMEZONE) + return self.app.timezone @cached_property def utc_enabled(self): @@ -118,7 +163,7 @@ class schedule(object): def to_local(self, dt): if not self.utc_enabled: - return timezone.to_local_fallback(dt, self.tz) + return timezone.to_local_fallback(dt) return dt @@ -198,10 +243,9 @@ class crontab_parser(object): if len(toks) > 1: to = self._expand_number(toks[1]) if to < fr: # Wrap around max_ if necessary - return range(fr, - self.min_ + self.max_) + range(self.min_, - to + 1) - return range(fr, to + 1) + return (list(range(fr, self.min_ + self.max_)) + + list(range(self.min_, to + 1))) + return list(range(fr, to + 1)) return [fr] def _range_steps(self, toks): @@ -215,10 +259,10 @@ class crontab_parser(object): return self._expand_star()[::int(toks[0])] def _expand_star(self, *args): - return range(self.min_, self.max_ + self.min_) + return list(range(self.min_, self.max_ + self.min_)) def _expand_number(self, s): - if isinstance(s, basestring) and s[0] == '-': + if isinstance(s, string_t) and s[0] == '-': raise self.ParseException('negative numbers not supported') try: i = int(s) @@ -226,15 +270,16 @@ class crontab_parser(object): try: i = weekday(s) except KeyError: - raise ValueError("Invalid weekday literal '%s'." % s) + raise ValueError('Invalid weekday literal {0!r}.'.format(s)) max_val = self.min_ + self.max_ - 1 if i > max_val: raise ValueError( - 'Invalid end range: %s > %s.' % (i, max_val)) + 'Invalid end range: {0} > {1}.'.format(i, max_val)) if i < self.min_: raise ValueError( - 'Invalid beginning range: %s < %s.' % (i, self.min_)) + 'Invalid beginning range: {0} < {1}.'.format(i, self.min_)) + return i @@ -295,6 +340,15 @@ class crontab(schedule): of every quarter) or `month_of_year='2-12/2'` (for every even numbered month). + .. attribute:: nowfun + + Function returning the current date and time + (:class:`~datetime.datetime`). + + .. attribute:: app + + The Celery app instance. + It is important to realize that any day on which execution should occur must be represented by entries in all three of the day and month attributes. For example, if `day_of_week` is 0 and `day_of_month` @@ -305,18 +359,33 @@ class crontab(schedule): """ + def __init__(self, minute='*', hour='*', day_of_week='*', + day_of_month='*', month_of_year='*', nowfun=None, app=None): + self._orig_minute = cronfield(minute) + self._orig_hour = cronfield(hour) + self._orig_day_of_week = cronfield(day_of_week) + self._orig_day_of_month = cronfield(day_of_month) + self._orig_month_of_year = cronfield(month_of_year) + self.hour = self._expand_cronspec(hour, 24) + self.minute = self._expand_cronspec(minute, 60) + self.day_of_week = self._expand_cronspec(day_of_week, 7) + self.day_of_month = self._expand_cronspec(day_of_month, 31, 1) + self.month_of_year = self._expand_cronspec(month_of_year, 12, 1) + self.nowfun = nowfun + self._app = app + @staticmethod def _expand_cronspec(cronspec, max_, min_=0): """Takes the given cronspec argument in one of the forms:: int (like 7) - basestring (like '3-5,*/15', '*', or 'monday') + str (like '3-5,*/15', '*', or 'monday') set (like set([0,15,30,45])) list (like [8-17]) And convert it to an (expanded) set representing all time unit values on which the crontab triggers. Only in case of the base - type being 'basestring', parsing occurs. (It is fast and + type being 'str', parsing occurs. (It is fast and happens only once for each crontab instance, so there is no significant performance overhead involved.) @@ -332,26 +401,20 @@ class crontab(schedule): """ if isinstance(cronspec, int): result = set([cronspec]) - elif isinstance(cronspec, basestring): + elif isinstance(cronspec, string_t): result = crontab_parser(max_, min_).parse(cronspec) elif isinstance(cronspec, set): result = cronspec elif is_iterable(cronspec): result = set(cronspec) else: - raise TypeError( - 'Argument cronspec needs to be of any of the ' - 'following types: int, basestring, or an iterable type. ' - "'%s' was given." % type(cronspec)) + raise TypeError(CRON_INVALID_TYPE.format(type=type(cronspec))) # assure the result does not preceed the min or exceed the max for number in result: if number >= max_ + min_ or number < min_: - raise ValueError( - 'Invalid crontab pattern. Valid ' - "range is %d-%d. '%d' was found." % ( - min_, max_ - 1 + min_, number)) - + raise ValueError(CRON_PATTERN_INVALID.format( + min=min_, max=max_ - 1 + min_, value=number)) return result def _delta_to_next(self, last_run_at, next_hour, next_minute): @@ -412,39 +475,19 @@ class crontab(schedule): datedata.dom += 1 roll_over() - return relativedelta(year=datedata.year, - month=months_of_year[datedata.moy], - day=days_of_month[datedata.dom], - hour=next_hour, - minute=next_minute, - second=0, - microsecond=0) - - def __init__(self, minute='*', hour='*', day_of_week='*', - day_of_month='*', month_of_year='*', nowfun=None): - self._orig_minute = cronfield(minute) - self._orig_hour = cronfield(hour) - self._orig_day_of_week = cronfield(day_of_week) - self._orig_day_of_month = cronfield(day_of_month) - self._orig_month_of_year = cronfield(month_of_year) - self.hour = self._expand_cronspec(hour, 24) - self.minute = self._expand_cronspec(minute, 60) - self.day_of_week = self._expand_cronspec(day_of_week, 7) - self.day_of_month = self._expand_cronspec(day_of_month, 31, 1) - self.month_of_year = self._expand_cronspec(month_of_year, 12, 1) - self.nowfun = nowfun + return ffwd(year=datedata.year, + month=months_of_year[datedata.moy], + day=days_of_month[datedata.dom], + hour=next_hour, + minute=next_minute, + second=0, + microsecond=0) def now(self): return (self.nowfun or self.app.now)() def __repr__(self): - return '' % ( - self._orig_minute, - self._orig_hour, - self._orig_day_of_week, - self._orig_day_of_month, - self._orig_month_of_year, - ) + return CRON_REPR.format(self) def __reduce__(self): return (self.__class__, (self._orig_minute, @@ -453,8 +496,7 @@ class crontab(schedule): self._orig_day_of_month, self._orig_month_of_year), None) - def remaining_delta(self, last_run_at, tz=None): - """Returns when the periodic task should run next as a timedelta.""" + def remaining_delta(self, last_run_at, tz=None, ffwd=ffwd): tz = tz or self.tz last_run_at = self.maybe_make_aware(last_run_at) now = self.maybe_make_aware(self.now()) @@ -474,9 +516,7 @@ class crontab(schedule): if execute_this_hour: next_minute = min(minute for minute in self.minute if minute > last_run_at.minute) - delta = relativedelta(minute=next_minute, - second=0, - microsecond=0) + delta = ffwd(minute=next_minute, second=0, microsecond=0) else: next_minute = min(self.minute) execute_today = (execute_this_date and @@ -485,10 +525,8 @@ class crontab(schedule): if execute_today: next_hour = min(hour for hour in self.hour if hour > last_run_at.hour) - delta = relativedelta(hour=next_hour, - minute=next_minute, - second=0, - microsecond=0) + delta = ffwd(hour=next_hour, minute=next_minute, + second=0, microsecond=0) else: next_hour = min(self.hour) all_dom_moy = (self._orig_day_of_month == '*' and @@ -498,20 +536,20 @@ class crontab(schedule): if day > dow_num] or self.day_of_week) add_week = next_day == dow_num - delta = relativedelta(weeks=add_week and 1 or 0, - weekday=(next_day - 1) % 7, - hour=next_hour, - minute=next_minute, - second=0, - microsecond=0) + delta = ffwd(weeks=add_week and 1 or 0, + weekday=(next_day - 1) % 7, + hour=next_hour, + minute=next_minute, + second=0, + microsecond=0) else: delta = self._delta_to_next(last_run_at, next_hour, next_minute) return self.to_local(last_run_at), delta, self.to_local(now) - def remaining_estimate(self, last_run_at): + def remaining_estimate(self, last_run_at, ffwd=ffwd): """Returns when the periodic task should run next as a timedelta.""" - return remaining(*self.remaining_delta(last_run_at)) + return remaining(*self.remaining_delta(last_run_at, ffwd=ffwd)) def is_due(self, last_run_at): """Returns tuple of two items `(is_due, next_time_to_run)`, @@ -526,7 +564,7 @@ class crontab(schedule): if due: rem_delta = self.remaining_estimate(self.now()) rem = timedelta_seconds(rem_delta) - return due, rem + return schedstate(due, rem) def __eq__(self, other): if isinstance(other, crontab): @@ -535,12 +573,18 @@ class crontab(schedule): other.day_of_week == self.day_of_week and other.hour == self.hour and other.minute == self.minute) - return other is self + return NotImplemented + + def __ne__(self, other): + return not self.__eq__(other) -def maybe_schedule(s, relative=False): - if isinstance(s, int): - s = timedelta(seconds=s) - if isinstance(s, timedelta): - return schedule(s, relative) +def maybe_schedule(s, relative=False, app=None): + if s is not None: + if isinstance(s, int): + s = timedelta(seconds=s) + if isinstance(s, timedelta): + return schedule(s, relative, app=app) + else: + s.app = app return s diff --git a/awx/lib/site-packages/celery/security/__init__.py b/awx/lib/site-packages/celery/security/__init__.py index cb2b4ca47d..352d400cfc 100644 --- a/awx/lib/site-packages/celery/security/__init__.py +++ b/awx/lib/site-packages/celery/security/__init__.py @@ -7,11 +7,11 @@ """ from __future__ import absolute_import -from __future__ import with_statement -from kombu.serialization import registry +from kombu.serialization import ( + registry, disable_insecure_serializers as _disable_insecure_serializers, +) -from celery import current_app from celery.exceptions import ImproperlyConfigured from .serialization import register_auth @@ -33,40 +33,19 @@ configuration settings to use the auth serializer. Please see the configuration reference for more information. """ - -def disable_untrusted_serializers(whitelist=None): - for name in set(registry._decoders) - set(whitelist or []): - registry.disable(name) +__all__ = ['setup_security'] def setup_security(allowed_serializers=None, key=None, cert=None, store=None, - digest='sha1', serializer='json'): - """Setup the message-signing serializer. + digest='sha1', serializer='json', app=None): + """See :meth:`@Celery.setup_security`.""" + if app is None: + from celery import current_app + app = current_app._get_current_object() - Disables untrusted serializers and if configured to use the ``auth`` - serializer will register the auth serializer with the provided settings - into the Kombu serializer registry. + _disable_insecure_serializers(allowed_serializers) - :keyword allowed_serializers: List of serializer names, or content_types - that should be exempt from being disabled. - :keyword key: Name of private key file to use. - Defaults to the :setting:`CELERY_SECURITY_KEY` setting. - :keyword cert: Name of certificate file to use. - Defaults to the :setting:`CELERY_SECURITY_CERTIFICATE` setting. - :keyword store: Directory containing certificates. - Defaults to the :setting:`CELERY_SECURITY_CERT_STORE` setting. - :keyword digest: Digest algorithm used when signing messages. - Default is ``sha1``. - :keyword serializer: Serializer used to encode messages after - they have been signed. See :setting:`CELERY_TASK_SERIALIZER` for - the serializers supported. - Default is ``json``. - - """ - - disable_untrusted_serializers(allowed_serializers) - - conf = current_app.conf + conf = app.conf if conf.CELERY_TASK_SERIALIZER != 'auth': return @@ -84,4 +63,9 @@ def setup_security(allowed_serializers=None, key=None, cert=None, store=None, with open(key) as kf: with open(cert) as cf: - register_auth(kf.read(), cf.read(), store) + register_auth(kf.read(), cf.read(), store, digest, serializer) + registry._set_default_serializer('auth') + + +def disable_untrusted_serializers(whitelist=None): + _disable_insecure_serializers(allowed=whitelist) diff --git a/awx/lib/site-packages/celery/security/certificate.py b/awx/lib/site-packages/celery/security/certificate.py index 218f542961..df2387e6f1 100644 --- a/awx/lib/site-packages/celery/security/certificate.py +++ b/awx/lib/site-packages/celery/security/certificate.py @@ -7,22 +7,26 @@ """ from __future__ import absolute_import -from __future__ import with_statement import glob import os +from kombu.utils.encoding import bytes_to_str + from celery.exceptions import SecurityError +from celery.five import values from .utils import crypto, reraise_errors +__all__ = ['Certificate', 'CertStore', 'FSCertStore'] + class Certificate(object): """X.509 certificate.""" def __init__(self, cert): assert crypto is not None - with reraise_errors('Invalid certificate: %r'): + with reraise_errors('Invalid certificate: {0!r}'): self._cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert) def has_expired(self): @@ -30,21 +34,21 @@ class Certificate(object): return self._cert.has_expired() def get_serial_number(self): - """Returns the certificates serial number.""" + """Return the serial number in the certificate.""" return self._cert.get_serial_number() def get_issuer(self): - """Returns issuer (CA) as a string""" - return ' '.join(x[1] for x in + """Return issuer (CA) as a string""" + return ' '.join(bytes_to_str(x[1]) for x in self._cert.get_issuer().get_components()) def get_id(self): """Serial number/issuer pair uniquely identifies a certificate""" - return '%s %s' % (self.get_issuer(), self.get_serial_number()) + return '{0} {1}'.format(self.get_issuer(), self.get_serial_number()) def verify(self, data, signature, digest): """Verifies the signature for string containing data.""" - with reraise_errors('Bad signature: %r'): + with reraise_errors('Bad signature: {0!r}'): crypto.verify(self._cert, signature, data, digest) @@ -56,7 +60,7 @@ class CertStore(object): def itercerts(self): """an iterator over the certificates""" - for c in self._certs.itervalues(): + for c in values(self._certs): yield c def __getitem__(self, id): @@ -64,11 +68,11 @@ class CertStore(object): try: return self._certs[id] except KeyError: - raise SecurityError('Unknown certificate: %r' % (id, )) + raise SecurityError('Unknown certificate: {0!r}'.format(id)) def add_cert(self, cert): if cert.get_id() in self._certs: - raise SecurityError('Duplicate certificate: %r' % (id, )) + raise SecurityError('Duplicate certificate: {0!r}'.format(id)) self._certs[cert.get_id()] = cert @@ -84,5 +88,5 @@ class FSCertStore(CertStore): cert = Certificate(f.read()) if cert.has_expired(): raise SecurityError( - 'Expired certificate: %r' % (cert.get_id(), )) + 'Expired certificate: {0!r}'.format(cert.get_id())) self.add_cert(cert) diff --git a/awx/lib/site-packages/celery/security/key.py b/awx/lib/site-packages/celery/security/key.py index 528fab9e73..a5c2620427 100644 --- a/awx/lib/site-packages/celery/security/key.py +++ b/awx/lib/site-packages/celery/security/key.py @@ -7,18 +7,21 @@ """ from __future__ import absolute_import -from __future__ import with_statement + +from kombu.utils.encoding import ensure_bytes from .utils import crypto, reraise_errors +__all__ = ['PrivateKey'] + class PrivateKey(object): def __init__(self, key): - with reraise_errors('Invalid private key: %r'): + with reraise_errors('Invalid private key: {0!r}'): self._key = crypto.load_privatekey(crypto.FILETYPE_PEM, key) def sign(self, data, digest): """sign string containing data.""" - with reraise_errors('Unable to sign data: %r'): - return crypto.sign(self._key, data, digest) + with reraise_errors('Unable to sign data: {0!r}'): + return crypto.sign(self._key, ensure_bytes(data), digest) diff --git a/awx/lib/site-packages/celery/security/serialization.py b/awx/lib/site-packages/celery/security/serialization.py index 4284f47480..0a45b5e978 100644 --- a/awx/lib/site-packages/celery/security/serialization.py +++ b/awx/lib/site-packages/celery/security/serialization.py @@ -7,17 +7,18 @@ """ from __future__ import absolute_import -from __future__ import with_statement import base64 -from kombu.serialization import registry, encode, decode -from kombu.utils.encoding import bytes_to_str, str_to_bytes +from kombu.serialization import registry, dumps, loads +from kombu.utils.encoding import bytes_to_str, str_to_bytes, ensure_bytes from .certificate import Certificate, FSCertStore from .key import PrivateKey from .utils import reraise_errors +__all__ = ['SecureSerializer', 'register_auth'] + def b64encode(s): return bytes_to_str(base64.b64encode(str_to_bytes(s))) @@ -41,13 +42,14 @@ class SecureSerializer(object): """serialize data structure into string""" assert self._key is not None assert self._cert is not None - with reraise_errors('Unable to serialize: %r', (Exception, )): - content_type, content_encoding, body = encode( + with reraise_errors('Unable to serialize: {0!r}', (Exception, )): + content_type, content_encoding, body = dumps( data, serializer=self._serializer) # What we sign is the serialized body, not the body itself. # this way the receiver doesn't have to decode the contents # to verify the signature (and thus avoiding potential flaws # in the decoding step). + body = ensure_bytes(body) return self._pack(body, content_type, content_encoding, signature=self._key.sign(body, self._digest), signer=self._cert.get_id()) @@ -55,24 +57,48 @@ class SecureSerializer(object): def deserialize(self, data): """deserialize data structure from string""" assert self._cert_store is not None - with reraise_errors('Unable to deserialize: %r', (Exception, )): + with reraise_errors('Unable to deserialize: {0!r}', (Exception, )): payload = self._unpack(data) signature, signer, body = (payload['signature'], payload['signer'], payload['body']) self._cert_store[signer].verify(body, signature, self._digest) - return decode(body, payload['content_type'], - payload['content_encoding'], force=True) + return loads(bytes_to_str(body), payload['content_type'], + payload['content_encoding'], force=True) def _pack(self, body, content_type, content_encoding, signer, signature, - sep='\x00\x01'): - return b64encode(sep.join([signer, signature, - content_type, content_encoding, body])) + sep=str_to_bytes('\x00\x01')): + fields = sep.join( + ensure_bytes(s) for s in [signer, signature, content_type, + content_encoding, body] + ) + return b64encode(fields) - def _unpack(self, payload, sep='\x00\x01', - fields=('signer', 'signature', 'content_type', - 'content_encoding', 'body')): - return dict(zip(fields, b64decode(payload).split(sep))) + def _unpack(self, payload, sep=str_to_bytes('\x00\x01')): + raw_payload = b64decode(ensure_bytes(payload)) + first_sep = raw_payload.find(sep) + + signer = raw_payload[:first_sep] + signer_cert = self._cert_store[signer] + + sig_len = signer_cert._cert.get_pubkey().bits() >> 3 + signature = raw_payload[ + first_sep + len(sep):first_sep + len(sep) + sig_len + ] + end_of_sig = first_sep + len(sep) + sig_len+len(sep) + + v = raw_payload[end_of_sig:].split(sep) + + values = [bytes_to_str(signer), bytes_to_str(signature), + bytes_to_str(v[0]), bytes_to_str(v[1]), bytes_to_str(v[2])] + + return { + 'signer': values[0], + 'signature': values[1], + 'content_type': values[2], + 'content_encoding': values[3], + 'body': values[4], + } def register_auth(key=None, cert=None, store=None, digest='sha1', diff --git a/awx/lib/site-packages/celery/security/utils.py b/awx/lib/site-packages/celery/security/utils.py index 37b2286a78..d184d0b4c9 100644 --- a/awx/lib/site-packages/celery/security/utils.py +++ b/awx/lib/site-packages/celery/security/utils.py @@ -13,18 +13,23 @@ import sys from contextlib import contextmanager from celery.exceptions import SecurityError +from celery.five import reraise try: from OpenSSL import crypto except ImportError: # pragma: no cover crypto = None # noqa +__all__ = ['reraise_errors'] + @contextmanager -def reraise_errors(msg='%r', errors=None): +def reraise_errors(msg='{0!r}', errors=None): assert crypto is not None errors = (crypto.Error, ) if errors is None else errors try: yield - except errors, exc: - raise SecurityError, SecurityError(msg % (exc, )), sys.exc_info()[2] + except errors as exc: + reraise(SecurityError, + SecurityError(msg.format(exc)), + sys.exc_info()[2]) diff --git a/awx/lib/site-packages/celery/signals.py b/awx/lib/site-packages/celery/signals.py index 3e34be0555..6eae2febff 100644 --- a/awx/lib/site-packages/celery/signals.py +++ b/awx/lib/site-packages/celery/signals.py @@ -15,33 +15,61 @@ from __future__ import absolute_import from .utils.dispatch import Signal +__all__ = ['before_task_publish', 'after_task_publish', + 'task_prerun', 'task_postrun', 'task_success', + 'task_retry', 'task_failure', 'task_revoked', 'celeryd_init', + 'celeryd_after_setup', 'worker_init', 'worker_process_init', + 'worker_ready', 'worker_shutdown', 'setup_logging', + 'after_setup_logger', 'after_setup_task_logger', + 'beat_init', 'beat_embedded_init', 'eventlet_pool_started', + 'eventlet_pool_preshutdown', 'eventlet_pool_postshutdown', + 'eventlet_pool_apply'] + +before_task_publish = Signal(providing_args=[ + 'body', 'exchange', 'routing_key', 'headers', 'properties', + 'declare', 'retry_policy', +]) +after_task_publish = Signal(providing_args=[ + 'body', 'exchange', 'routing_key', +]) +#: Deprecated, use after_task_publish instead. task_sent = Signal(providing_args=[ - 'task_id', 'task', 'args', 'kwargs', 'eta', 'taskset']) + 'task_id', 'task', 'args', 'kwargs', 'eta', 'taskset', +]) task_prerun = Signal(providing_args=['task_id', 'task', 'args', 'kwargs']) task_postrun = Signal(providing_args=[ - 'task_id', 'task', 'args', 'kwargs', 'retval']) + 'task_id', 'task', 'args', 'kwargs', 'retval', +]) task_success = Signal(providing_args=['result']) task_retry = Signal(providing_args=[ 'request', 'reason', 'einfo', ]) task_failure = Signal(providing_args=[ - 'task_id', 'exception', 'args', 'kwargs', 'traceback', 'einfo']) -task_revoked = Signal(providing_args=['terminated', 'signum', 'expired']) -celeryd_init = Signal(providing_args=['instance', 'conf']) + 'task_id', 'exception', 'args', 'kwargs', 'traceback', 'einfo', +]) +task_revoked = Signal(providing_args=[ + 'request', 'terminated', 'signum', 'expired', +]) +celeryd_init = Signal(providing_args=['instance', 'conf', 'options']) celeryd_after_setup = Signal(providing_args=['instance', 'conf']) worker_init = Signal(providing_args=[]) worker_process_init = Signal(providing_args=[]) +worker_process_shutdown = Signal(providing_args=[]) worker_ready = Signal(providing_args=[]) worker_shutdown = Signal(providing_args=[]) setup_logging = Signal(providing_args=[ - 'loglevel', 'logfile', 'format', 'colorize']) + 'loglevel', 'logfile', 'format', 'colorize', +]) after_setup_logger = Signal(providing_args=[ - 'logger', 'loglevel', 'logfile', 'format', 'colorize']) + 'logger', 'loglevel', 'logfile', 'format', 'colorize', +]) after_setup_task_logger = Signal(providing_args=[ - 'logger', 'loglevel', 'logfile', 'format', 'colorize']) + 'logger', 'loglevel', 'logfile', 'format', 'colorize', +]) beat_init = Signal(providing_args=[]) beat_embedded_init = Signal(providing_args=[]) eventlet_pool_started = Signal(providing_args=[]) eventlet_pool_preshutdown = Signal(providing_args=[]) eventlet_pool_postshutdown = Signal(providing_args=[]) eventlet_pool_apply = Signal(providing_args=['target', 'args', 'kwargs']) +user_preload_options = Signal(providing_args=['app', 'options']) diff --git a/awx/lib/site-packages/celery/states.py b/awx/lib/site-packages/celery/states.py index 925953ac10..1665cefa53 100644 --- a/awx/lib/site-packages/celery/states.py +++ b/awx/lib/site-packages/celery/states.py @@ -59,6 +59,10 @@ Misc. """ from __future__ import absolute_import +__all__ = ['PENDING', 'RECEIVED', 'STARTED', 'SUCCESS', 'FAILURE', + 'REVOKED', 'RETRY', 'IGNORED', 'READY_STATES', 'UNREADY_STATES', + 'EXCEPTION_STATES', 'PROPAGATE_STATES', 'precedence', 'state'] + #: State precedence. #: None represents the precedence of an unknown state. #: Lower index means higher precedence. @@ -123,14 +127,22 @@ class state(str): def __le__(self, other): return self.compare(other, lambda a, b: a >= b) +#: Task state is unknown (assumed pending since you know the id). PENDING = 'PENDING' +#: Task was received by a worker. RECEIVED = 'RECEIVED' +#: Task was started by a worker (:setting:`CELERY_TRACK_STARTED`). STARTED = 'STARTED' +#: Task succeeded SUCCESS = 'SUCCESS' +#: Task failed FAILURE = 'FAILURE' +#: Task was revoked. REVOKED = 'REVOKED' +#: Task is waiting for retry. RETRY = 'RETRY' IGNORED = 'IGNORED' +REJECTED = 'REJECTED' READY_STATES = frozenset([SUCCESS, FAILURE, REVOKED]) UNREADY_STATES = frozenset([PENDING, RECEIVED, STARTED, RETRY]) diff --git a/awx/lib/site-packages/celery/task/__init__.py b/awx/lib/site-packages/celery/task/__init__.py index d2a04118b8..f8326e887c 100644 --- a/awx/lib/site-packages/celery/task/__init__.py +++ b/awx/lib/site-packages/celery/task/__init__.py @@ -12,7 +12,7 @@ from __future__ import absolute_import from celery._state import current_app, current_task as current -from celery.__compat__ import MagicModule, recreate_module +from celery.five import MagicModule, recreate_module from celery.local import Proxy __all__ = [ @@ -23,7 +23,7 @@ __all__ = [ STATICA_HACK = True globals()['kcah_acitats'[::-1].upper()] = False -if STATICA_HACK: +if STATICA_HACK: # pragma: no cover # This is never executed, but tricks static analyzers (PyDev, PyCharm, # pylint, etc.) into knowing the types of these symbols, and what # they contain. diff --git a/awx/lib/site-packages/celery/task/base.py b/awx/lib/site-packages/celery/task/base.py index 85e6a39f1a..f223fbbdd0 100644 --- a/awx/lib/site-packages/celery/task/base.py +++ b/awx/lib/site-packages/celery/task/base.py @@ -14,15 +14,17 @@ from __future__ import absolute_import from kombu import Exchange from celery import current_app -from celery.__compat__ import class_property, reclassmethod from celery.app.task import Context, TaskType, Task as BaseTask # noqa +from celery.five import class_property, reclassmethod from celery.schedules import maybe_schedule from celery.utils.log import get_task_logger +__all__ = ['Task', 'PeriodicTask', 'task'] + #: list of methods that must be classmethods in the old API. _COMPAT_CLASSMETHODS = ( 'delay', 'apply_async', 'retry', 'apply', 'subtask_from_request', - 'AsyncResult', 'subtask', '_get_request', + 'AsyncResult', 'subtask', '_get_request', '_get_exec_options', ) @@ -47,14 +49,12 @@ class Task(BaseTask): immediate = False # XXX deprecated priority = None type = 'regular' - error_whitelist = () disable_error_emails = False accept_magic_kwargs = False from_config = BaseTask.from_config + ( ('exchange_type', 'CELERY_DEFAULT_EXCHANGE_TYPE'), ('delivery_mode', 'CELERY_DEFAULT_DELIVERY_MODE'), - ('error_whitelist', 'CELERY_TASK_ERROR_WHITELIST'), ) # In old Celery the @task decorator didn't exist, so one would create @@ -66,7 +66,6 @@ class Task(BaseTask): locals()[name] = reclassmethod(getattr(BaseTask, name)) @class_property - @classmethod def request(cls): return cls._get_request() @@ -75,7 +74,7 @@ class Task(BaseTask): return get_task_logger(self.name) @classmethod - def establish_connection(self, connect_timeout=None): + def establish_connection(self): """Deprecated method used to get a broker connection. Should be replaced with :meth:`@Celery.connection` @@ -91,11 +90,10 @@ class Task(BaseTask): with celery.connection() as conn: ... """ - return self._get_app().connection( - connect_timeout=connect_timeout) + return self._get_app().connection() def get_publisher(self, connection=None, exchange=None, - connect_timeout=None, exchange_type=None, **options): + exchange_type=None, **options): """Deprecated method to get the task publisher (now called producer). Should be replaced with :class:`@amqp.TaskProducer`: @@ -110,7 +108,7 @@ class Task(BaseTask): exchange = self.exchange if exchange is None else exchange if exchange_type is None: exchange_type = self.exchange_type - connection = connection or self.establish_connection(connect_timeout) + connection = connection or self.establish_connection() return self._get_app().amqp.TaskProducer( connection, exchange=exchange and Exchange(exchange, exchange_type), @@ -161,68 +159,11 @@ class PeriodicTask(Task): def task(*args, **kwargs): - """Decorator to create a task class out of any callable. - - **Examples** - - .. code-block:: python - - @task() - def refresh_feed(url): - return Feed.objects.get(url=url).refresh() - - With setting extra options and using retry. - - .. code-block:: python - - @task(max_retries=10) - def refresh_feed(url): - try: - return Feed.objects.get(url=url).refresh() - except socket.error, exc: - refresh_feed.retry(exc=exc) - - Calling the resulting task: - - >>> refresh_feed('http://example.com/rss') # Regular - - >>> refresh_feed.delay('http://example.com/rss') # Async - - """ + """Deprecated decorators, please use :meth:`~@task`.""" return current_app.task(*args, **dict({'accept_magic_kwargs': False, 'base': Task}, **kwargs)) def periodic_task(*args, **options): - """Decorator to create a task class out of any callable. - - .. admonition:: Examples - - .. code-block:: python - - @task() - def refresh_feed(url): - return Feed.objects.get(url=url).refresh() - - With setting extra options and using retry. - - .. code-block:: python - - from celery.task import current - - @task(exchange='feeds') - def refresh_feed(url): - try: - return Feed.objects.get(url=url).refresh() - except socket.error, exc: - current.retry(exc=exc) - - Calling the resulting task: - - >>> refresh_feed('http://example.com/rss') # Regular - - >>> refresh_feed.delay('http://example.com/rss') # Async - - - """ + """Deprecated decorator, please use :setting:`CELERYBEAT_SCHEDULE`.""" return task(**dict({'base': PeriodicTask}, **options)) diff --git a/awx/lib/site-packages/celery/task/http.py b/awx/lib/site-packages/celery/task/http.py index c9f776aff9..152bfff1cb 100644 --- a/awx/lib/site-packages/celery/task/http.py +++ b/awx/lib/site-packages/celery/task/http.py @@ -10,23 +10,46 @@ from __future__ import absolute_import import anyjson import sys -import urllib2 -from urllib import urlencode -from urlparse import urlparse try: - from urlparse import parse_qsl + from urllib.parse import parse_qsl, urlencode, urlparse # Py3 except ImportError: # pragma: no cover - from cgi import parse_qsl # noqa + from urllib import urlencode # noqa + from urlparse import urlparse, parse_qsl # noqa -from celery import __version__ as celery_version +from celery import shared_task, __version__ as celery_version +from celery.five import items, reraise from celery.utils.log import get_task_logger -from .base import Task as BaseTask + +__all__ = ['InvalidResponseError', 'RemoteExecuteError', 'UnknownStatusError', + 'HttpDispatch', 'dispatch', 'URL'] GET_METHODS = frozenset(['GET', 'HEAD']) logger = get_task_logger(__name__) +if sys.version_info[0] == 3: # pragma: no cover + + from urllib.request import Request, urlopen + + def utf8dict(tup): + if not isinstance(tup, dict): + return dict(tup) + return tup + +else: + + from urllib2 import Request, urlopen # noqa + + def utf8dict(tup): # noqa + """With a dict's items() tuple return a new dict with any utf-8 + keys/values encoded.""" + return dict( + (k.encode('utf-8'), + v.encode('utf-8') if isinstance(v, unicode) else v) + for k, v in tup) + + class InvalidResponseError(Exception): """The remote server gave an invalid response.""" @@ -39,37 +62,15 @@ class UnknownStatusError(InvalidResponseError): """The remote server gave an unknown status.""" -def maybe_utf8(value): - """Encode to utf-8, only if the value is Unicode.""" - if isinstance(value, unicode): - return value.encode('utf-8') - return value - - -if sys.version_info[0] == 3: # pragma: no cover - - def utf8dict(tup): - if not isinstance(tup, dict): - return dict(tup) - return tup -else: - - def utf8dict(tup): # noqa - """With a dict's items() tuple return a new dict with any utf-8 - keys/values encoded.""" - return dict((key.encode('utf-8'), maybe_utf8(value)) - for key, value in tup) - - def extract_response(raw_response, loads=anyjson.loads): """Extract the response text from a raw JSON response.""" if not raw_response: raise InvalidResponseError('Empty response') try: payload = loads(raw_response) - except ValueError, exc: - raise InvalidResponseError, InvalidResponseError( - str(exc)), sys.exc_info()[2] + except ValueError as exc: + reraise(InvalidResponseError, InvalidResponseError( + str(exc)), sys.exc_info()[2]) status = payload['status'] if status == 'success': @@ -106,15 +107,15 @@ class MutableURL(object): def __str__(self): scheme, netloc, path, params, query, fragment = self.parts - query = urlencode(utf8dict(self.query.items())) + query = urlencode(utf8dict(items(self.query))) components = [scheme + '://', netloc, path or '/', - ';%s' % params if params else '', - '?%s' % query if query else '', - '#%s' % fragment if fragment else ''] + ';{0}'.format(params) if params else '', + '?{0}'.format(query) if query else '', + '#{0}'.format(fragment) if fragment else ''] return ''.join(c for c in components if c) def __repr__(self): - return '<%s: %s>' % (self.__class__.__name__, str(self)) + return '<{0}: {1}>'.format(type(self).__name__, self) class HttpDispatch(object): @@ -127,21 +128,21 @@ class HttpDispatch(object): :param logger: Logger used for user/system feedback. """ - user_agent = 'celery/%s' % celery_version + user_agent = 'celery/{version}'.format(version=celery_version) timeout = 5 def __init__(self, url, method, task_kwargs, **kwargs): self.url = url self.method = method self.task_kwargs = task_kwargs - self.logger = kwargs.get("logger") or logger + self.logger = kwargs.get('logger') or logger def make_request(self, url, method, params): - """Makes an HTTP request and returns the response.""" - request = urllib2.Request(url, params) - for key, val in self.http_headers.items(): + """Perform HTTP request and return the response.""" + request = Request(url, params) + for key, val in items(self.http_headers): request.add_header(key, val) - response = urllib2.urlopen(request) # user catches errors. + response = urlopen(request) # user catches errors. return response.read() def dispatch(self): @@ -151,7 +152,7 @@ class HttpDispatch(object): if self.method in GET_METHODS: url.query.update(self.task_kwargs) else: - params = urlencode(utf8dict(self.task_kwargs.items())) + params = urlencode(utf8dict(items(self.task_kwargs))) raw_response = self.make_request(str(url), self.method, params) return extract_response(raw_response) @@ -161,7 +162,9 @@ class HttpDispatch(object): return headers -class HttpDispatchTask(BaseTask): +@shared_task(name='celery.http_dispatch', bind=True, + url=None, method=None, accept_magic_kwargs=False) +def dispatch(self, url=None, method='GET', **kwargs): """Task dispatching to an URL. :keyword url: The URL location of the HTTP callback task. @@ -182,15 +185,9 @@ class HttpDispatchTask(BaseTask): argument, as this attribute is intended for subclasses. """ - - url = None - method = None - accept_magic_kwargs = False - - def run(self, url=None, method='GET', **kwargs): - url = url or self.url - method = method or self.method - return HttpDispatch(url, method, kwargs).dispatch() + return HttpDispatch( + url or self.url, method or self.method, kwargs, + ).dispatch() class URL(MutableURL): @@ -200,14 +197,21 @@ class URL(MutableURL): :param url: URL to request. :keyword dispatcher: Class used to dispatch the request. - By default this is :class:`HttpDispatchTask`. + By default this is :func:`dispatch`. """ - dispatcher = HttpDispatchTask + dispatcher = None - def __init__(self, url, dispatcher=None): + def __init__(self, url, dispatcher=None, app=None): super(URL, self).__init__(url) + self.app = app self.dispatcher = dispatcher or self.dispatcher + if self.dispatcher is None: + # Get default dispatcher + self.dispatcher = ( + self.app.tasks['celery.http_dispatch'] if self.app + else dispatch + ) def get_async(self, **kwargs): return self.dispatcher.delay(str(self), 'GET', **kwargs) diff --git a/awx/lib/site-packages/celery/task/sets.py b/awx/lib/site-packages/celery/task/sets.py index 33630f466e..e277b796dc 100644 --- a/awx/lib/site-packages/celery/task/sets.py +++ b/awx/lib/site-packages/celery/task/sets.py @@ -8,12 +8,19 @@ """ from __future__ import absolute_import -from __future__ import with_statement from celery._state import get_current_worker_task from celery.app import app_or_default -from celery.canvas import subtask, maybe_subtask # noqa -from celery.utils import uuid +from celery.canvas import maybe_signature # noqa +from celery.utils import uuid, warn_deprecated + +from celery.canvas import subtask # noqa + +warn_deprecated( + 'celery.task.sets and TaskSet', removal='4.0', + alternative="""\ +Please use "group" instead (see the Canvas section in the userguide)\ +""") class TaskSet(list): @@ -24,6 +31,8 @@ class TaskSet(list): Example:: + >>> from myproj.tasks import refresh_feed + >>> urls = ('http://cnn.com/rss', 'http://bbc.co.uk/rss') >>> s = TaskSet(refresh_feed.s(url) for url in urls) >>> taskset_result = s.apply_async() @@ -33,20 +42,21 @@ class TaskSet(list): app = None def __init__(self, tasks=None, app=None, Publisher=None): - super(TaskSet, self).__init__(maybe_subtask(t) for t in tasks or []) self.app = app_or_default(app or self.app) + super(TaskSet, self).__init__( + maybe_signature(t, app=self.app) for t in tasks or [] + ) self.Publisher = Publisher or self.app.amqp.TaskProducer self.total = len(self) # XXX compat - def apply_async(self, connection=None, connect_timeout=None, - publisher=None, taskset_id=None): + def apply_async(self, connection=None, publisher=None, taskset_id=None): """Apply TaskSet.""" app = self.app if app.conf.CELERY_ALWAYS_EAGER: return self.apply(taskset_id=taskset_id) - with app.connection_or_acquire(connection, connect_timeout) as conn: + with app.connection_or_acquire(connection) as conn: setid = taskset_id or uuid() pub = publisher or self.Publisher(conn) results = self._async_results(setid, pub) @@ -54,7 +64,7 @@ class TaskSet(list): result = app.TaskSetResult(setid, results) parent = get_current_worker_task() if parent: - parent.request.children.append(result) + parent.add_trail(result) return result def _async_results(self, taskset_id, publisher): @@ -69,9 +79,10 @@ class TaskSet(list): def _sync_results(self, taskset_id): return [task.apply(taskset_id=taskset_id) for task in self] - def _get_tasks(self): + @property + def tasks(self): return self - def _set_tasks(self, tasks): + @tasks.setter # noqa + def tasks(self, tasks): self[:] = tasks - tasks = property(_get_tasks, _set_tasks) diff --git a/awx/lib/site-packages/celery/task/trace.py b/awx/lib/site-packages/celery/task/trace.py index 6a2a3bef88..5e5f5a8e9e 100644 --- a/awx/lib/site-packages/celery/task/trace.py +++ b/awx/lib/site-packages/celery/task/trace.py @@ -1,423 +1,12 @@ -# -*- coding: utf-8 -*- -""" - celery.task.trace - ~~~~~~~~~~~~~~~~~~~~ - - This module defines how the task execution is traced: - errors are recorded, handlers are applied and so on. - -""" +"""This module has moved to celery.app.trace.""" from __future__ import absolute_import -# ## --- -# This is the heart of the worker, the inner loop so to speak. -# It used to be split up into nice little classes and methods, -# but in the end it only resulted in bad performance and horrible tracebacks, -# so instead we now use one closure per task class. - -import os -import socket import sys -from warnings import warn +from celery.utils import warn_deprecated -from kombu.utils import kwdict +warn_deprecated('celery.task.trace', removal='3.2', + alternative='Please use celery.app.trace instead.') -from celery import current_app -from celery import states, signals -from celery._state import _task_stack -from celery.app import set_default_app -from celery.app.task import Task as BaseTask, Context -from celery.datastructures import ExceptionInfo -from celery.exceptions import Ignore, RetryTaskError -from celery.utils.serialization import ( - get_pickleable_exception, - get_pickleable_etype, -) -from celery.utils.log import get_logger - -_logger = get_logger(__name__) - -send_prerun = signals.task_prerun.send -prerun_receivers = signals.task_prerun.receivers -send_postrun = signals.task_postrun.send -postrun_receivers = signals.task_postrun.receivers -send_success = signals.task_success.send -success_receivers = signals.task_success.receivers -STARTED = states.STARTED -SUCCESS = states.SUCCESS -IGNORED = states.IGNORED -RETRY = states.RETRY -FAILURE = states.FAILURE -EXCEPTION_STATES = states.EXCEPTION_STATES -IGNORE_STATES = frozenset([IGNORED, RETRY]) - -#: set by :func:`setup_worker_optimizations` -_tasks = None -_patched = {} - - -def mro_lookup(cls, attr, stop=(), monkey_patched=[]): - """Returns the first node by MRO order that defines an attribute. - - :keyword stop: A list of types that if reached will stop the search. - :keyword monkey_patched: Use one of the stop classes if the attr's - module origin is not in this list, this to detect monkey patched - attributes. - - :returns None: if the attribute was not found. - - """ - for node in cls.mro(): - if node in stop: - try: - attr = node.__dict__[attr] - module_origin = attr.__module__ - except (AttributeError, KeyError): - pass - else: - if module_origin not in monkey_patched: - return node - return - if attr in node.__dict__: - return node - - -def task_has_custom(task, attr): - """Returns true if the task or one of its bases - defines ``attr`` (excluding the one in BaseTask).""" - return mro_lookup(task.__class__, attr, stop=(BaseTask, object), - monkey_patched=['celery.app.task']) - - -class TraceInfo(object): - __slots__ = ('state', 'retval') - - def __init__(self, state, retval=None): - self.state = state - self.retval = retval - - def handle_error_state(self, task, eager=False): - store_errors = not eager - if task.ignore_result: - store_errors = task.store_errors_even_if_ignored - - return { - RETRY: self.handle_retry, - FAILURE: self.handle_failure, - }[self.state](task, store_errors=store_errors) - - def handle_retry(self, task, store_errors=True): - """Handle retry exception.""" - # the exception raised is the RetryTaskError semi-predicate, - # and it's exc' attribute is the original exception raised (if any). - req = task.request - type_, _, tb = sys.exc_info() - try: - reason = self.retval - einfo = ExceptionInfo((type_, reason, tb)) - if store_errors: - task.backend.mark_as_retry(req.id, reason.exc, einfo.traceback) - task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo) - signals.task_retry.send(sender=task, request=req, - reason=reason, einfo=einfo) - return einfo - finally: - del(tb) - - def handle_failure(self, task, store_errors=True): - """Handle exception.""" - req = task.request - type_, _, tb = sys.exc_info() - try: - exc = self.retval - einfo = ExceptionInfo() - einfo.exception = get_pickleable_exception(einfo.exception) - einfo.type = get_pickleable_etype(einfo.type) - if store_errors: - task.backend.mark_as_failure(req.id, exc, einfo.traceback) - task.on_failure(exc, req.id, req.args, req.kwargs, einfo) - signals.task_failure.send(sender=task, task_id=req.id, - exception=exc, args=req.args, - kwargs=req.kwargs, - traceback=tb, - einfo=einfo) - return einfo - finally: - del(tb) - - -def build_tracer(name, task, loader=None, hostname=None, store_errors=True, - Info=TraceInfo, eager=False, propagate=False, - IGNORE_STATES=IGNORE_STATES): - """Builts a function that tracing the tasks execution; catches all - exceptions, and saves the state and result of the task execution - to the result backend. - - If the call was successful, it saves the result to the task result - backend, and sets the task status to `"SUCCESS"`. - - If the call raises :exc:`~celery.exceptions.RetryTaskError`, it extracts - the original exception, uses that as the result and sets the task status - to `"RETRY"`. - - If the call results in an exception, it saves the exception as the task - result, and sets the task status to `"FAILURE"`. - - Returns a function that takes the following arguments: - - :param uuid: The unique id of the task. - :param args: List of positional args to pass on to the function. - :param kwargs: Keyword arguments mapping to pass on to the function. - :keyword request: Request dict. - - """ - # If the task doesn't define a custom __call__ method - # we optimize it away by simply calling the run method directly, - # saving the extra method call and a line less in the stack trace. - fun = task if task_has_custom(task, '__call__') else task.run - - loader = loader or current_app.loader - backend = task.backend - ignore_result = task.ignore_result - track_started = task.track_started - track_started = not eager and (task.track_started and not ignore_result) - publish_result = not eager and not ignore_result - hostname = hostname or socket.gethostname() - - loader_task_init = loader.on_task_init - loader_cleanup = loader.on_process_cleanup - - task_on_success = None - task_after_return = None - if task_has_custom(task, 'on_success'): - task_on_success = task.on_success - if task_has_custom(task, 'after_return'): - task_after_return = task.after_return - - store_result = backend.store_result - backend_cleanup = backend.process_cleanup - - pid = os.getpid() - - request_stack = task.request_stack - push_request = request_stack.push - pop_request = request_stack.pop - push_task = _task_stack.push - pop_task = _task_stack.pop - on_chord_part_return = backend.on_chord_part_return - - from celery import canvas - subtask = canvas.subtask - - def trace_task(uuid, args, kwargs, request=None): - R = I = None - kwargs = kwdict(kwargs) - try: - push_task(task) - task_request = Context(request or {}, args=args, - called_directly=False, kwargs=kwargs) - push_request(task_request) - try: - # -*- PRE -*- - if prerun_receivers: - send_prerun(sender=task, task_id=uuid, task=task, - args=args, kwargs=kwargs) - loader_task_init(uuid, task) - if track_started: - store_result(uuid, {'pid': pid, - 'hostname': hostname}, STARTED) - - # -*- TRACE -*- - try: - R = retval = fun(*args, **kwargs) - state = SUCCESS - except Ignore, exc: - I, R = Info(IGNORED, exc), ExceptionInfo(internal=True) - state, retval = I.state, I.retval - except RetryTaskError, exc: - I = Info(RETRY, exc) - state, retval = I.state, I.retval - R = I.handle_error_state(task, eager=eager) - except Exception, exc: - if propagate: - raise - I = Info(FAILURE, exc) - state, retval = I.state, I.retval - R = I.handle_error_state(task, eager=eager) - [subtask(errback).apply_async((uuid, )) - for errback in task_request.errbacks or []] - except BaseException, exc: - raise - except: # pragma: no cover - # For Python2.5 where raising strings are still allowed - # (but deprecated) - if propagate: - raise - I = Info(FAILURE, None) - state, retval = I.state, I.retval - R = I.handle_error_state(task, eager=eager) - [subtask(errback).apply_async((uuid, )) - for errback in task_request.errbacks or []] - else: - # callback tasks must be applied before the result is - # stored, so that result.children is populated. - [subtask(callback).apply_async((retval, )) - for callback in task_request.callbacks or []] - if publish_result: - store_result(uuid, retval, SUCCESS) - if task_on_success: - task_on_success(retval, uuid, args, kwargs) - if success_receivers: - send_success(sender=task, result=retval) - - # -* POST *- - if state not in IGNORE_STATES: - if task_request.chord: - on_chord_part_return(task) - if task_after_return: - task_after_return( - state, retval, uuid, args, kwargs, None, - ) - if postrun_receivers: - send_postrun(sender=task, task_id=uuid, task=task, - args=args, kwargs=kwargs, - retval=retval, state=state) - finally: - pop_task() - pop_request() - if not eager: - try: - backend_cleanup() - loader_cleanup() - except (KeyboardInterrupt, SystemExit, MemoryError): - raise - except Exception, exc: - _logger.error('Process cleanup failed: %r', exc, - exc_info=True) - except Exception, exc: - if eager: - raise - R = report_internal_error(task, exc) - return R, I - - return trace_task - - -def trace_task(task, uuid, args, kwargs, request={}, **opts): - try: - if task.__trace__ is None: - task.__trace__ = build_tracer(task.name, task, **opts) - return task.__trace__(uuid, args, kwargs, request)[0] - except Exception, exc: - return report_internal_error(task, exc) - - -def _trace_task_ret(name, uuid, args, kwargs, request={}, **opts): - return trace_task(current_app.tasks[name], - uuid, args, kwargs, request, **opts) -trace_task_ret = _trace_task_ret - - -def _fast_trace_task(task, uuid, args, kwargs, request={}): - # setup_worker_optimizations will point trace_task_ret to here, - # so this is the function used in the worker. - return _tasks[task].__trace__(uuid, args, kwargs, request)[0] - - -def eager_trace_task(task, uuid, args, kwargs, request=None, **opts): - opts.setdefault('eager', True) - return build_tracer(task.name, task, **opts)( - uuid, args, kwargs, request) - - -def report_internal_error(task, exc): - _type, _value, _tb = sys.exc_info() - try: - _value = task.backend.prepare_exception(exc) - exc_info = ExceptionInfo((_type, _value, _tb), internal=True) - warn(RuntimeWarning( - 'Exception raised outside body: %r:\n%s' % ( - exc, exc_info.traceback))) - return exc_info - finally: - del(_tb) - - -def setup_worker_optimizations(app): - global _tasks - global trace_task_ret - - # make sure custom Task.__call__ methods that calls super - # will not mess up the request/task stack. - _install_stack_protection() - - # all new threads start without a current app, so if an app is not - # passed on to the thread it will fall back to the "default app", - # which then could be the wrong app. So for the worker - # we set this to always return our app. This is a hack, - # and means that only a single app can be used for workers - # running in the same process. - app.set_current() - set_default_app(app) - - # evaluate all task classes by finalizing the app. - app.finalize() - - # set fast shortcut to task registry - _tasks = app._tasks - - trace_task_ret = _fast_trace_task - try: - job = sys.modules['celery.worker.job'] - except KeyError: - pass - else: - job.trace_task_ret = _fast_trace_task - job.__optimize__() - - -def reset_worker_optimizations(): - global trace_task_ret - trace_task_ret = _trace_task_ret - try: - delattr(BaseTask, '_stackprotected') - except AttributeError: - pass - try: - BaseTask.__call__ = _patched.pop('BaseTask.__call__') - except KeyError: - pass - try: - sys.modules['celery.worker.job'].trace_task_ret = _trace_task_ret - except KeyError: - pass - - -def _install_stack_protection(): - # Patches BaseTask.__call__ in the worker to handle the edge case - # where people override it and also call super. - # - # - The worker optimizes away BaseTask.__call__ and instead - # calls task.run directly. - # - so with the addition of current_task and the request stack - # BaseTask.__call__ now pushes to those stacks so that - # they work when tasks are called directly. - # - # The worker only optimizes away __call__ in the case - # where it has not been overridden, so the request/task stack - # will blow if a custom task class defines __call__ and also - # calls super(). - if not getattr(BaseTask, '_stackprotected', False): - _patched['BaseTask.__call__'] = orig = BaseTask.__call__ - - def __protected_call__(self, *args, **kwargs): - stack = self.request_stack - req = stack.top - if req and not req._protected and \ - len(stack) == 1 and not req.called_directly: - req._protected = 1 - return self.run(*args, **kwargs) - return orig(self, *args, **kwargs) - BaseTask.__call__ = __protected_call__ - BaseTask._stackprotected = True +from celery.app import trace +sys.modules[__name__] = trace diff --git a/awx/lib/site-packages/celery/tests/__init__.py b/awx/lib/site-packages/celery/tests/__init__.py index 0284069f0e..4394ba977e 100644 --- a/awx/lib/site-packages/celery/tests/__init__.py +++ b/awx/lib/site-packages/celery/tests/__init__.py @@ -1,5 +1,4 @@ from __future__ import absolute_import -from __future__ import with_statement import logging import os @@ -15,24 +14,22 @@ except NameError: class WindowsError(Exception): pass -config_module = os.environ.setdefault( - 'CELERY_TEST_CONFIG_MODULE', 'celery.tests.config', -) - -os.environ.setdefault('CELERY_CONFIG_MODULE', config_module) -os.environ['CELERY_LOADER'] = 'default' -os.environ['EVENTLET_NOPATCH'] = 'yes' -os.environ['GEVENT_NOPATCH'] = 'yes' -os.environ['KOMBU_DISABLE_LIMIT_PROTECTION'] = 'yes' -os.environ['CELERY_BROKER_URL'] = 'memory://' - def setup(): + os.environ.update( + # warn if config module not found + C_WNOCONF='yes', + KOMBU_DISABLE_LIMIT_PROTECTION='yes', + ) + if os.environ.get('COVER_ALL_MODULES') or '--with-coverage3' in sys.argv: - from celery.tests.utils import catch_warnings + from warnings import catch_warnings with catch_warnings(record=True): import_all_modules() warnings.resetwarnings() + from celery.tests.case import Trap + from celery._state import set_default_app + set_default_app(Trap()) def teardown(): @@ -79,9 +76,11 @@ def find_distribution_modules(name=__name__, file=__file__): def import_all_modules(name=__name__, file=__file__, - skip=['celery.decorators', 'celery.contrib.batches']): + skip=('celery.decorators', + 'celery.contrib.batches', + 'celery.task')): for module in find_distribution_modules(name, file): - if module not in skip: + if not module.startswith(skip): try: import_module(module) except ImportError: diff --git a/awx/lib/site-packages/celery/tests/app/test_amqp.py b/awx/lib/site-packages/celery/tests/app/test_amqp.py index 06aec7892c..2bbd376301 100644 --- a/awx/lib/site-packages/celery/tests/app/test_amqp.py +++ b/awx/lib/site-packages/celery/tests/app/test_amqp.py @@ -1,11 +1,10 @@ from __future__ import absolute_import -from __future__ import with_statement from kombu import Exchange, Queue -from mock import Mock from celery.app.amqp import Queues, TaskPublisher -from celery.tests.utils import AppCase +from celery.five import keys +from celery.tests.case import AppCase, Mock class test_TaskProducer(AppCase): @@ -36,6 +35,38 @@ class test_TaskProducer(AppCase): prod.publish_task('tasks.add', (2, 2), {}, retry=False, chord=123) self.assertFalse(prod.connection.ensure.call_count) + def test_publish_custom_queue(self): + prod = self.app.amqp.TaskProducer(Mock()) + self.app.amqp.queues['some_queue'] = Queue( + 'xxx', Exchange('yyy'), 'zzz', + ) + prod.channel.connection.client.declared_entities = set() + prod.publish = Mock() + prod.publish_task('tasks.add', (8, 8), {}, retry=False, + queue='some_queue') + self.assertEqual(prod.publish.call_args[1]['exchange'], 'yyy') + self.assertEqual(prod.publish.call_args[1]['routing_key'], 'zzz') + + def test_event_dispatcher(self): + prod = self.app.amqp.TaskProducer(Mock()) + self.assertTrue(prod.event_dispatcher) + self.assertFalse(prod.event_dispatcher.enabled) + + +class test_TaskConsumer(AppCase): + + def test_accept_content(self): + with self.app.pool.acquire(block=True) as conn: + self.app.conf.CELERY_ACCEPT_CONTENT = ['application/json'] + self.assertEqual( + self.app.amqp.TaskConsumer(conn).accept, + set(['application/json']) + ) + self.assertEqual( + self.app.amqp.TaskConsumer(conn, accept=['json']).accept, + set(['application/json']), + ) + class test_compat_TaskPublisher(AppCase): @@ -49,70 +80,58 @@ class test_compat_TaskPublisher(AppCase): self.assertEqual(producer.exchange.type, 'topic') def test_compat_exchange_is_Exchange(self): - producer = TaskPublisher(exchange=Exchange('foo')) + producer = TaskPublisher(exchange=Exchange('foo'), app=self.app) self.assertEqual(producer.exchange.name, 'foo') class test_PublisherPool(AppCase): def test_setup_nolimit(self): - L = self.app.conf.BROKER_POOL_LIMIT self.app.conf.BROKER_POOL_LIMIT = None try: delattr(self.app, '_pool') except AttributeError: pass self.app.amqp._producer_pool = None - try: - pool = self.app.amqp.producer_pool - self.assertEqual(pool.limit, self.app.pool.limit) - self.assertFalse(pool._resource.queue) + pool = self.app.amqp.producer_pool + self.assertEqual(pool.limit, self.app.pool.limit) + self.assertFalse(pool._resource.queue) - r1 = pool.acquire() - r2 = pool.acquire() - r1.release() - r2.release() - r1 = pool.acquire() - r2 = pool.acquire() - finally: - self.app.conf.BROKER_POOL_LIMIT = L + r1 = pool.acquire() + r2 = pool.acquire() + r1.release() + r2.release() + r1 = pool.acquire() + r2 = pool.acquire() def test_setup(self): - L = self.app.conf.BROKER_POOL_LIMIT self.app.conf.BROKER_POOL_LIMIT = 2 try: delattr(self.app, '_pool') except AttributeError: pass self.app.amqp._producer_pool = None - try: - pool = self.app.amqp.producer_pool - self.assertEqual(pool.limit, self.app.pool.limit) - self.assertTrue(pool._resource.queue) + pool = self.app.amqp.producer_pool + self.assertEqual(pool.limit, self.app.pool.limit) + self.assertTrue(pool._resource.queue) - p1 = r1 = pool.acquire() - p2 = r2 = pool.acquire() - r1.release() - r2.release() - r1 = pool.acquire() - r2 = pool.acquire() - self.assertIs(p2, r1) - self.assertIs(p1, r2) - r1.release() - r2.release() - finally: - self.app.conf.BROKER_POOL_LIMIT = L + p1 = r1 = pool.acquire() + p2 = r2 = pool.acquire() + r1.release() + r2.release() + r1 = pool.acquire() + r2 = pool.acquire() + self.assertIs(p2, r1) + self.assertIs(p1, r2) + r1.release() + r2.release() class test_Queues(AppCase): def test_queues_format(self): - prev, self.app.amqp.queues._consume_from = ( - self.app.amqp.queues._consume_from, {}) - try: - self.assertEqual(self.app.amqp.queues.format(), '') - finally: - self.app.amqp.queues._consume_from = prev + self.app.amqp.queues._consume_from = {} + self.assertEqual(self.app.amqp.queues.format(), '') def test_with_defaults(self): self.assertEqual(Queues(None), {}) @@ -124,6 +143,49 @@ class test_Queues(AppCase): self.assertIsInstance(q['foo'], Queue) self.assertEqual(q['foo'].routing_key, 'rk') + def test_with_ha_policy(self): + qn = Queues(ha_policy=None, create_missing=False) + qn.add('xyz') + self.assertIsNone(qn['xyz'].queue_arguments) + + qn.add('xyx', queue_arguments={'x-foo': 'bar'}) + self.assertEqual(qn['xyx'].queue_arguments, {'x-foo': 'bar'}) + + q = Queues(ha_policy='all', create_missing=False) + q.add(Queue('foo')) + self.assertEqual(q['foo'].queue_arguments, {'x-ha-policy': 'all'}) + + qq = Queue('xyx2', queue_arguments={'x-foo': 'bari'}) + q.add(qq) + self.assertEqual(q['xyx2'].queue_arguments, { + 'x-ha-policy': 'all', + 'x-foo': 'bari', + }) + + q2 = Queues(ha_policy=['A', 'B', 'C'], create_missing=False) + q2.add(Queue('foo')) + self.assertEqual(q2['foo'].queue_arguments, { + 'x-ha-policy': 'nodes', + 'x-ha-policy-params': ['A', 'B', 'C'], + }) + + def test_select_add(self): + q = Queues() + q.select(['foo', 'bar']) + q.select_add('baz') + self.assertItemsEqual(keys(q._consume_from), ['foo', 'bar', 'baz']) + + def test_deselect(self): + q = Queues() + q.select(['foo', 'bar']) + q.deselect('bar') + self.assertItemsEqual(keys(q._consume_from), ['foo']) + + def test_with_ha_policy_compat(self): + q = Queues(ha_policy='all') + q.add('bar') + self.assertEqual(q['bar'].queue_arguments, {'x-ha-policy': 'all'}) + def test_add_default_exchange(self): ex = Exchange('fff', 'fanout') q = Queues(default_exchange=ex) diff --git a/awx/lib/site-packages/celery/tests/app/test_annotations.py b/awx/lib/site-packages/celery/tests/app/test_annotations.py index 10e49bef94..559f5cb010 100644 --- a/awx/lib/site-packages/celery/tests/app/test_annotations.py +++ b/awx/lib/site-packages/celery/tests/app/test_annotations.py @@ -1,32 +1,35 @@ from __future__ import absolute_import from celery.app.annotations import MapAnnotation, prepare -from celery.task import task from celery.utils.imports import qualname -from celery.tests.utils import Case - - -@task() -def add(x, y): - return x + y - - -@task() -def mul(x, y): - return x * y +from celery.tests.case import AppCase class MyAnnotation(object): foo = 65 -class test_MapAnnotation(Case): +class AnnotationCase(AppCase): + + def setup(self): + @self.app.task(shared=False) + def add(x, y): + return x + y + self.add = add + + @self.app.task(shared=False) + def mul(x, y): + return x * y + self.mul = mul + + +class test_MapAnnotation(AnnotationCase): def test_annotate(self): - x = MapAnnotation({add.name: {'foo': 1}}) - self.assertDictEqual(x.annotate(add), {'foo': 1}) - self.assertIsNone(x.annotate(mul)) + x = MapAnnotation({self.add.name: {'foo': 1}}) + self.assertDictEqual(x.annotate(self.add), {'foo': 1}) + self.assertIsNone(x.annotate(self.mul)) def test_annotate_any(self): x = MapAnnotation({'*': {'foo': 2}}) @@ -36,10 +39,10 @@ class test_MapAnnotation(Case): self.assertIsNone(x.annotate_any()) -class test_prepare(Case): +class test_prepare(AnnotationCase): def test_dict_to_MapAnnotation(self): - x = prepare({add.name: {'foo': 3}}) + x = prepare({self.add.name: {'foo': 3}}) self.assertIsInstance(x[0], MapAnnotation) def test_returns_list(self): diff --git a/awx/lib/site-packages/celery/tests/app/test_app.py b/awx/lib/site-packages/celery/tests/app/test_app.py index 5e1a15586d..93857c7974 100644 --- a/awx/lib/site-packages/celery/tests/app/test_app.py +++ b/awx/lib/site-packages/celery/tests/app/test_app.py @@ -1,180 +1,258 @@ from __future__ import absolute_import -from __future__ import with_statement import os +import itertools -from mock import Mock, patch +from copy import deepcopy from pickle import loads, dumps from kombu import Exchange -from celery import Celery +from celery import shared_task, current_app from celery import app as _app from celery import _state +from celery.app import base as _appbase from celery.app import defaults +from celery.exceptions import ImproperlyConfigured +from celery.five import items from celery.loaders.base import BaseLoader from celery.platforms import pyimplementation from celery.utils.serialization import pickle -from celery.tests import config -from celery.tests.utils import (Case, mask_modules, platform_pyimp, - sys_platform, pypy_version) +from celery.tests.case import ( + CELERY_TEST_CONFIG, + AppCase, + Mock, + depends_on_current_app, + mask_modules, + patch, + platform_pyimp, + sys_platform, + pypy_version, + with_environ, +) from celery.utils import uuid from celery.utils.mail import ErrorMail THIS_IS_A_KEY = 'this is a value' +class ObjectConfig(object): + FOO = 1 + BAR = 2 + +object_config = ObjectConfig() +dict_config = dict(FOO=10, BAR=20) + + class Object(object): def __init__(self, **kwargs): - for key, value in kwargs.items(): + for key, value in items(kwargs): setattr(self, key, value) def _get_test_config(): - return dict((key, getattr(config, key)) - for key in dir(config) - if key.isupper() and not key.startswith('_')) - + return deepcopy(CELERY_TEST_CONFIG) test_config = _get_test_config() -class test_module(Case): +class test_module(AppCase): def test_default_app(self): self.assertEqual(_app.default_app, _state.default_app) def test_bugreport(self): - self.assertTrue(_app.bugreport()) + self.assertTrue(_app.bugreport(app=self.app)) -class test_App(Case): +class test_App(AppCase): - def setUp(self): - self.app = Celery(set_as_current=False) - self.app.conf.update(test_config) + def setup(self): + self.app.add_defaults(test_config) def test_task(self): - app = Celery('foozibari', set_as_current=False) + with self.Celery('foozibari') as app: - def fun(): - pass + def fun(): + pass - fun.__module__ = '__main__' - task = app.task(fun) - self.assertEqual(task.name, app.main + '.fun') + fun.__module__ = '__main__' + task = app.task(fun) + self.assertEqual(task.name, app.main + '.fun') - def test_with_broker(self): - prev = os.environ.get('CELERY_BROKER_URL') - os.environ.pop('CELERY_BROKER_URL', None) + def test_with_config_source(self): + with self.Celery(config_source=ObjectConfig) as app: + self.assertEqual(app.conf.FOO, 1) + self.assertEqual(app.conf.BAR, 2) + + @depends_on_current_app + def test_task_windows_execv(self): + prev, _appbase._EXECV = _appbase._EXECV, True try: - app = Celery(set_as_current=False, broker='foo://baribaz') - self.assertEqual(app.conf.BROKER_HOST, 'foo://baribaz') + + @self.app.task(shared=False) + def foo(): + pass + + self.assertTrue(foo._get_current_object()) # is proxy + finally: - os.environ['CELERY_BROKER_URL'] = prev + _appbase._EXECV = prev + assert not _appbase._EXECV + + def test_task_takes_no_args(self): + with self.assertRaises(TypeError): + @self.app.task(1) + def foo(): + pass + + def test_add_defaults(self): + self.assertFalse(self.app.configured) + _conf = {'FOO': 300} + conf = lambda: _conf + self.app.add_defaults(conf) + self.assertIn(conf, self.app._pending_defaults) + self.assertFalse(self.app.configured) + self.assertEqual(self.app.conf.FOO, 300) + self.assertTrue(self.app.configured) + self.assertFalse(self.app._pending_defaults) + + # defaults not pickled + appr = loads(dumps(self.app)) + with self.assertRaises(AttributeError): + appr.conf.FOO + + # add more defaults after configured + conf2 = {'FOO': 'BAR'} + self.app.add_defaults(conf2) + self.assertEqual(self.app.conf.FOO, 'BAR') + + self.assertIn(_conf, self.app.conf.defaults) + self.assertIn(conf2, self.app.conf.defaults) + + def test_connection_or_acquire(self): + with self.app.connection_or_acquire(block=True): + self.assertTrue(self.app.pool._dirty) + + with self.app.connection_or_acquire(pool=False): + self.assertFalse(self.app.pool._dirty) + + def test_maybe_close_pool(self): + cpool = self.app._pool = Mock() + ppool = self.app.amqp._producer_pool = Mock() + self.app._maybe_close_pool() + cpool.force_close_all.assert_called_with() + ppool.force_close_all.assert_called_with() + self.assertIsNone(self.app._pool) + self.assertIsNone(self.app.amqp._producer_pool) + + self.app._pool = Mock() + self.app._maybe_close_pool() + self.app._maybe_close_pool() + + def test_using_v1_reduce(self): + self.app._using_v1_reduce = True + self.assertTrue(loads(dumps(self.app))) + + def test_autodiscover_tasks(self): + self.app.conf.CELERY_FORCE_BILLIARD_LOGGING = True + with patch('celery.app.base.ensure_process_aware_logger') as ep: + self.app.loader.autodiscover_tasks = Mock() + self.app.autodiscover_tasks(['proj.A', 'proj.B']) + ep.assert_called_with() + self.app.loader.autodiscover_tasks.assert_called_with( + ['proj.A', 'proj.B'], 'tasks', + ) + with patch('celery.app.base.ensure_process_aware_logger') as ep: + self.app.conf.CELERY_FORCE_BILLIARD_LOGGING = False + self.app.autodiscover_tasks(['proj.A', 'proj.B']) + self.assertFalse(ep.called) + + @with_environ('CELERY_BROKER_URL', '') + def test_with_broker(self): + with self.Celery(broker='foo://baribaz') as app: + self.assertEqual(app.conf.BROKER_URL, 'foo://baribaz') def test_repr(self): self.assertTrue(repr(self.app)) def test_custom_task_registry(self): - app1 = Celery(set_as_current=False) - app2 = Celery(set_as_current=False, tasks=app1.tasks) - self.assertIs(app2.tasks, app1.tasks) + with self.Celery(tasks=self.app.tasks) as app2: + self.assertIs(app2.tasks, self.app.tasks) def test_include_argument(self): - app = Celery(set_as_current=False, include=('foo', 'bar.foo')) - self.assertEqual(app.conf.CELERY_IMPORTS, ('foo', 'bar.foo')) + with self.Celery(include=('foo', 'bar.foo')) as app: + self.assertEqual(app.conf.CELERY_IMPORTS, ('foo', 'bar.foo')) def test_set_as_current(self): current = _state._tls.current_app try: - app = Celery(set_as_current=True) + app = self.Celery(set_as_current=True) self.assertIs(_state._tls.current_app, app) finally: _state._tls.current_app = current def test_current_task(self): - app = Celery(set_as_current=False) - - @app.task - def foo(): + @self.app.task + def foo(shared=False): pass _state._task_stack.push(foo) try: - self.assertEqual(app.current_task.name, foo.name) + self.assertEqual(self.app.current_task.name, foo.name) finally: _state._task_stack.pop() def test_task_not_shared(self): - with patch('celery.app.base.shared_task') as shared_task: - app = Celery(set_as_current=False) - - @app.task(shared=False) + with patch('celery.app.base.shared_task') as sh: + @self.app.task(shared=False) def foo(): pass - self.assertFalse(shared_task.called) + self.assertFalse(sh.called) def test_task_compat_with_filter(self): - app = Celery(set_as_current=False, accept_magic_kwargs=True) - check = Mock() + with self.Celery(accept_magic_kwargs=True) as app: + check = Mock() - def filter(task): - check(task) - return task + def filter(task): + check(task) + return task - @app.task(filter=filter) - def foo(): - pass - check.assert_called_with(foo) + @app.task(filter=filter, shared=False) + def foo(): + pass + check.assert_called_with(foo) def test_task_with_filter(self): - app = Celery(set_as_current=False, accept_magic_kwargs=False) - check = Mock() + with self.Celery(accept_magic_kwargs=False) as app: + check = Mock() - def filter(task): - check(task) - return task + def filter(task): + check(task) + return task - @app.task(filter=filter) - def foo(): - pass - check.assert_called_with(foo) + assert not _appbase._EXECV + + @app.task(filter=filter, shared=False) + def foo(): + pass + check.assert_called_with(foo) def test_task_sets_main_name_MP_MAIN_FILE(self): from celery import utils as _utils _utils.MP_MAIN_FILE = __file__ try: - app = Celery('xuzzy', set_as_current=False) + with self.Celery('xuzzy') as app: - @app.task - def foo(): - pass + @app.task + def foo(): + pass - self.assertEqual(foo.name, 'xuzzy.foo') + self.assertEqual(foo.name, 'xuzzy.foo') finally: _utils.MP_MAIN_FILE = None - def test_base_task_inherits_magic_kwargs_from_app(self): - from celery.task import Task as OldTask - - class timkX(OldTask): - abstract = True - - app = Celery(set_as_current=False, accept_magic_kwargs=True) - timkX.bind(app) - # see #918 - self.assertFalse(timkX.accept_magic_kwargs) - - from celery import Task as NewTask - - class timkY(NewTask): - abstract = True - - timkY.bind(app) - self.assertFalse(timkY.accept_magic_kwargs) - def test_annotate_decorator(self): from celery.app.task import Task @@ -193,12 +271,11 @@ class test_App(Case): return fun(*args, **kwargs) return _inner - app = Celery(set_as_current=False) - app.conf.CELERY_ANNOTATIONS = { + self.app.conf.CELERY_ANNOTATIONS = { adX.name: {'@__call__': deco} } - adX.bind(app) - self.assertIs(adX.app, app) + adX.bind(self.app) + self.assertIs(adX.app, self.app) i = adX() i(2, 4, x=3) @@ -208,9 +285,7 @@ class test_App(Case): i.annotate() def test_apply_async_has__self__(self): - app = Celery(set_as_current=False) - - @app.task(__self__='hello') + @self.app.task(__self__='hello', shared=False) def aawsX(): pass @@ -219,26 +294,14 @@ class test_App(Case): args = dt.call_args[0][1] self.assertEqual(args, ('hello', 4, 5)) - def test_apply_async__connection_arg(self): - app = Celery(set_as_current=False) - - @app.task() - def aacaX(): - pass - - connection = app.connection('asd://') - with self.assertRaises(KeyError): - aacaX.apply_async(connection=connection) - def test_apply_async_adds_children(self): from celery._state import _task_stack - app = Celery(set_as_current=False) - @app.task() + @self.app.task(shared=False) def a3cX1(self): pass - @app.task() + @self.app.task(shared=False) def a3cX2(self): pass @@ -253,11 +316,6 @@ class test_App(Case): finally: _task_stack.pop() - def test_TaskSet(self): - ts = self.app.TaskSet() - self.assertListEqual(ts.tasks, []) - self.assertIs(ts.app, self.app) - def test_pickle_app(self): changes = dict(THE_FOO_BAR='bars', THE_MII_MAR='jars') @@ -268,19 +326,19 @@ class test_App(Case): self.assertDictContainsSubset(changes, restored.conf) def test_worker_main(self): - from celery.bin import celeryd + from celery.bin import worker as worker_bin - class WorkerCommand(celeryd.WorkerCommand): + class worker(worker_bin.worker): def execute_from_commandline(self, argv): return argv - prev, celeryd.WorkerCommand = celeryd.WorkerCommand, WorkerCommand + prev, worker_bin.worker = worker_bin.worker, worker try: ret = self.app.worker_main(argv=['--version']) self.assertListEqual(ret, ['--version']) finally: - celeryd.WorkerCommand = prev + worker_bin.worker = prev def test_config_from_envvar(self): os.environ['CELERYTEST_CONFIG_OBJECT'] = 'celery.tests.app.test_app' @@ -307,7 +365,6 @@ class test_App(Case): def test_config_from_cmdline(self): cmdline = ['.always_eager=no', '.result_backend=/dev/null', - '.task_error_whitelist=(list)["a", "b", "c"]', 'celeryd.prefetch_multiplier=368', '.foobarstring=(string)300', '.foobarint=(int)300', @@ -316,8 +373,6 @@ class test_App(Case): self.assertFalse(self.app.conf.CELERY_ALWAYS_EAGER) self.assertEqual(self.app.conf.CELERY_RESULT_BACKEND, '/dev/null') self.assertEqual(self.app.conf.CELERYD_PREFETCH_MULTIPLIER, 368) - self.assertListEqual(self.app.conf.CELERY_TASK_ERROR_WHITELIST, - ['a', 'b', 'c']) self.assertEqual(self.app.conf.CELERY_FOOBARSTRING, '300') self.assertEqual(self.app.conf.CELERY_FOOBARINT, 300) self.assertDictEqual(self.app.conf.CELERY_RESULT_ENGINE_OPTIONS, @@ -354,12 +409,46 @@ class test_App(Case): x = self.app.Worker self.assertIs(x.app, self.app) + @depends_on_current_app def test_AsyncResult(self): x = self.app.AsyncResult('1') self.assertIs(x.app, self.app) r = loads(dumps(x)) # not set as current, so ends up as default app after reduce - self.assertIs(r.app, _state.default_app) + self.assertIs(r.app, current_app._get_current_object()) + + def test_get_active_apps(self): + self.assertTrue(list(_state._get_active_apps())) + + app1 = self.Celery() + appid = id(app1) + self.assertIn(app1, _state._get_active_apps()) + app1.close() + del(app1) + + # weakref removed from list when app goes out of scope. + with self.assertRaises(StopIteration): + next(app for app in _state._get_active_apps() if id(app) == appid) + + def test_config_from_envvar_more(self, key='CELERY_HARNESS_CFG1'): + self.assertFalse(self.app.config_from_envvar('HDSAJIHWIQHEWQU', + silent=True)) + with self.assertRaises(ImproperlyConfigured): + self.app.config_from_envvar('HDSAJIHWIQHEWQU', silent=False) + os.environ[key] = __name__ + '.object_config' + self.assertTrue(self.app.config_from_envvar(key)) + self.assertEqual(self.app.conf['FOO'], 1) + self.assertEqual(self.app.conf['BAR'], 2) + + os.environ[key] = 'unknown_asdwqe.asdwqewqe' + with self.assertRaises(ImportError): + self.app.config_from_envvar(key, silent=False) + self.assertFalse(self.app.config_from_envvar(key, silent=True)) + + os.environ[key] = __name__ + '.dict_config' + self.assertTrue(self.app.config_from_envvar(key)) + self.assertEqual(self.app.conf['FOO'], 10) + self.assertEqual(self.app.conf['BAR'], 20) @patch('celery.bin.celery.CeleryCommand.execute_from_commandline') def test_start(self, execute): @@ -373,7 +462,7 @@ class test_App(Case): def mail_admins(*args, **kwargs): return args, kwargs - self.app.loader = Loader() + self.app.loader = Loader(app=self.app) self.app.conf.ADMINS = None self.assertFalse(self.app.mail_admins('Subject', 'Body')) self.app.conf.ADMINS = [('George Costanza', 'george@vandelay.com')] @@ -385,32 +474,48 @@ class test_App(Case): 'userid': 'guest', 'password': 'guest', 'virtual_host': '/'}, - self.app.connection('amqp://').info(), + self.app.connection('pyamqp://').info(), ) self.app.conf.BROKER_PORT = 1978 self.app.conf.BROKER_VHOST = 'foo' self.assertDictContainsSubset( {'port': 1978, 'virtual_host': 'foo'}, - self.app.connection('amqp://:1978/foo').info(), + self.app.connection('pyamqp://:1978/foo').info(), ) - conn = self.app.connection('amqp:////value') + conn = self.app.connection('pyamqp:////value') self.assertDictContainsSubset({'virtual_host': '/value'}, conn.info()) + def test_amqp_failover_strategy_selection(self): + # Test passing in a string and make sure the string + # gets there untouched + self.app.conf.BROKER_FAILOVER_STRATEGY = 'foo-bar' + self.assertEquals( + self.app.connection('amqp:////value').failover_strategy, + 'foo-bar', + ) + + # Try passing in None + self.app.conf.BROKER_FAILOVER_STRATEGY = None + self.assertEquals( + self.app.connection('amqp:////value').failover_strategy, + itertools.cycle, + ) + + # Test passing in a method + def my_failover_strategy(it): + yield True + + self.app.conf.BROKER_FAILOVER_STRATEGY = my_failover_strategy + self.assertEquals( + self.app.connection('amqp:////value').failover_strategy, + my_failover_strategy, + ) + def test_BROKER_BACKEND_alias(self): self.assertEqual(self.app.conf.BROKER_BACKEND, self.app.conf.BROKER_TRANSPORT) - def test_with_default_connection(self): - - @self.app.with_default_connection - def handler(connection=None, foo=None): - return connection, foo - - connection, foo = handler(foo=42) - self.assertEqual(foo, 42) - self.assertTrue(connection) - def test_after_fork(self): p = self.app._pool = Mock() self.app._after_fork(self.app) @@ -469,8 +574,16 @@ class test_App(Case): 'hostname': 'lana'} self.assertTrue(x) + def test_error_mail_disabled(self): + task = Mock() + x = ErrorMail(task) + x.should_send = Mock() + x.should_send.return_value = False + x.send(Mock(), Mock()) + self.assertFalse(task.app.mail_admins.called) -class test_defaults(Case): + +class test_defaults(AppCase): def test_str_to_bool(self): for s in ('false', 'no', '0'): @@ -481,7 +594,7 @@ class test_defaults(Case): defaults.strtobool('unsure') -class test_debugging_utils(Case): +class test_debugging_utils(AppCase): def test_enable_disable_trace(self): try: @@ -493,7 +606,7 @@ class test_debugging_utils(Case): _app.disable_trace() -class test_pyimplementation(Case): +class test_pyimplementation(AppCase): def test_platform_python_implementation(self): with platform_pyimp(lambda: 'Xython'): @@ -517,3 +630,32 @@ class test_pyimplementation(Case): with sys_platform('darwin'): with pypy_version(): self.assertEqual('CPython', pyimplementation()) + + +class test_shared_task(AppCase): + + def test_registers_to_all_apps(self): + with self.Celery('xproj', set_as_current=True) as xproj: + xproj.finalize() + + @shared_task + def foo(): + return 42 + + @shared_task() + def bar(): + return 84 + + self.assertIs(foo.app, xproj) + self.assertIs(bar.app, xproj) + self.assertTrue(foo._get_current_object()) + + with self.Celery('yproj', set_as_current=True) as yproj: + self.assertIs(foo.app, yproj) + self.assertIs(bar.app, yproj) + + @shared_task() + def baz(): + return 168 + + self.assertIs(baz.app, yproj) diff --git a/awx/lib/site-packages/celery/tests/app/test_beat.py b/awx/lib/site-packages/celery/tests/app/test_beat.py index 10262dfb70..0a1755ef19 100644 --- a/awx/lib/site-packages/celery/tests/app/test_beat.py +++ b/awx/lib/site-packages/celery/tests/app/test_beat.py @@ -1,20 +1,15 @@ from __future__ import absolute_import -from __future__ import with_statement import errno from datetime import datetime, timedelta -from mock import Mock, call, patch -from nose import SkipTest +from pickle import dumps, loads -from celery import current_app from celery import beat -from celery import task -from celery.result import AsyncResult +from celery.five import keys, string_t from celery.schedules import schedule -from celery.task.base import Task from celery.utils import uuid -from celery.tests.utils import Case, patch_settings +from celery.tests.case import AppCase, Mock, SkipTest, call, patch class Object(object): @@ -46,14 +41,17 @@ class MockService(object): self.stopped = True -class test_ScheduleEntry(Case): +class test_ScheduleEntry(AppCase): Entry = beat.ScheduleEntry def create_entry(self, **kwargs): - entry = dict(name='celery.unittest.add', - schedule=schedule(timedelta(seconds=10)), - args=(2, 2), - options={'routing_key': 'cpu'}) + entry = dict( + name='celery.unittest.add', + schedule=timedelta(seconds=10), + args=(2, 2), + options={'routing_key': 'cpu'}, + app=self.app, + ) return self.Entry(**dict(entry, **kwargs)) def test_next(self): @@ -63,19 +61,21 @@ class test_ScheduleEntry(Case): self.assertEqual(entry.total_run_count, 0) next_run_at = entry.last_run_at + timedelta(seconds=10) - next = entry.next(next_run_at) - self.assertGreaterEqual(next.last_run_at, next_run_at) - self.assertEqual(next.total_run_count, 1) + next_entry = entry.next(next_run_at) + self.assertGreaterEqual(next_entry.last_run_at, next_run_at) + self.assertEqual(next_entry.total_run_count, 1) def test_is_due(self): entry = self.create_entry(schedule=timedelta(seconds=10)) + self.assertIs(entry.app, self.app) + self.assertIs(entry.schedule.app, self.app) due1, next_time_to_run1 = entry.is_due() self.assertFalse(due1) self.assertGreater(next_time_to_run1, 9) next_run_at = entry.last_run_at - timedelta(seconds=10) - next = entry.next(next_run_at) - due2, next_time_to_run2 = next.is_due() + next_entry = entry.next(next_run_at) + due2, next_time_to_run2 = next_entry.is_due() self.assertTrue(due2) self.assertGreater(next_time_to_run2, 9) @@ -112,7 +112,7 @@ class mScheduler(beat.Scheduler): 'args': args, 'kwargs': kwargs, 'options': options}) - return AsyncResult(uuid()) + return self.app.AsyncResult(uuid()) class mSchedulerSchedulingError(mScheduler): @@ -143,73 +143,70 @@ always_due = mocked_schedule(True, 1) always_pending = mocked_schedule(False, 1) -class test_Scheduler(Case): +class test_Scheduler(AppCase): def test_custom_schedule_dict(self): custom = {'foo': 'bar'} - scheduler = mScheduler(schedule=custom, lazy=True) + scheduler = mScheduler(app=self.app, schedule=custom, lazy=True) self.assertIs(scheduler.data, custom) def test_apply_async_uses_registered_task_instances(self): - through_task = [False] - class MockTask(Task): + @self.app.task(shared=False) + def foo(): + pass + foo.apply_async = Mock(name='foo.apply_async') + assert foo.name in foo._get_app().tasks - @classmethod - def apply_async(cls, *args, **kwargs): - through_task[0] = True - - assert MockTask.name in MockTask._get_app().tasks - - scheduler = mScheduler() - scheduler.apply_async(scheduler.Entry(task=MockTask.name)) - self.assertTrue(through_task[0]) + scheduler = mScheduler(app=self.app) + scheduler.apply_async(scheduler.Entry(task=foo.name, app=self.app)) + self.assertTrue(foo.apply_async.called) def test_apply_async_should_not_sync(self): - @task() + @self.app.task(shared=False) def not_sync(): pass not_sync.apply_async = Mock() - s = mScheduler() + s = mScheduler(app=self.app) s._do_sync = Mock() s.should_sync = Mock() s.should_sync.return_value = True - s.apply_async(s.Entry(task=not_sync.name)) + s.apply_async(s.Entry(task=not_sync.name, app=self.app)) s._do_sync.assert_called_with() s._do_sync = Mock() s.should_sync.return_value = False - s.apply_async(s.Entry(task=not_sync.name)) + s.apply_async(s.Entry(task=not_sync.name, app=self.app)) self.assertFalse(s._do_sync.called) @patch('celery.app.base.Celery.send_task') def test_send_task(self, send_task): - b = beat.Scheduler() + b = beat.Scheduler(app=self.app) b.send_task('tasks.add', countdown=10) send_task.assert_called_with('tasks.add', countdown=10) def test_info(self): - scheduler = mScheduler() - self.assertIsInstance(scheduler.info, basestring) + scheduler = mScheduler(app=self.app) + self.assertIsInstance(scheduler.info, string_t) def test_maybe_entry(self): - s = mScheduler() - entry = s.Entry(name='add every', task='tasks.add') + s = mScheduler(app=self.app) + entry = s.Entry(name='add every', task='tasks.add', app=self.app) self.assertIs(s._maybe_entry(entry.name, entry), entry) self.assertTrue(s._maybe_entry('add every', { 'task': 'tasks.add', })) def test_set_schedule(self): - s = mScheduler() + s = mScheduler(app=self.app) s.schedule = {'foo': 'bar'} self.assertEqual(s.data, {'foo': 'bar'}) @patch('kombu.connection.Connection.ensure_connection') def test_ensure_connection_error_handler(self, ensure): - s = mScheduler() + s = mScheduler(app=self.app) self.assertTrue(s._ensure_connected()) self.assertTrue(ensure.called) callback = ensure.call_args[0][0] @@ -217,29 +214,26 @@ class test_Scheduler(Case): callback(KeyError(), 5) def test_install_default_entries(self): - with patch_settings(CELERY_TASK_RESULT_EXPIRES=None, - CELERYBEAT_SCHEDULE={}): - s = mScheduler() - s.install_default_entries({}) - self.assertNotIn('celery.backend_cleanup', s.data) - current_app.backend.supports_autoexpire = False - with patch_settings(CELERY_TASK_RESULT_EXPIRES=30, - CELERYBEAT_SCHEDULE={}): - s = mScheduler() - s.install_default_entries({}) - self.assertIn('celery.backend_cleanup', s.data) - current_app.backend.supports_autoexpire = True - try: - with patch_settings(CELERY_TASK_RESULT_EXPIRES=31, - CELERYBEAT_SCHEDULE={}): - s = mScheduler() - s.install_default_entries({}) - self.assertNotIn('celery.backend_cleanup', s.data) - finally: - current_app.backend.supports_autoexpire = False + self.app.conf.CELERY_TASK_RESULT_EXPIRES = None + self.app.conf.CELERYBEAT_SCHEDULE = {} + s = mScheduler(app=self.app) + s.install_default_entries({}) + self.assertNotIn('celery.backend_cleanup', s.data) + self.app.backend.supports_autoexpire = False + + self.app.conf.CELERY_TASK_RESULT_EXPIRES = 30 + s = mScheduler(app=self.app) + s.install_default_entries({}) + self.assertIn('celery.backend_cleanup', s.data) + + self.app.backend.supports_autoexpire = True + self.app.conf.CELERY_TASK_RESULT_EXPIRES = 31 + s = mScheduler(app=self.app) + s.install_default_entries({}) + self.assertNotIn('celery.backend_cleanup', s.data) def test_due_tick(self): - scheduler = mScheduler() + scheduler = mScheduler(app=self.app) scheduler.add(name='test_due_tick', schedule=always_due, args=(1, 2), @@ -248,33 +242,33 @@ class test_Scheduler(Case): @patch('celery.beat.error') def test_due_tick_SchedulingError(self, error): - scheduler = mSchedulerSchedulingError() + scheduler = mSchedulerSchedulingError(app=self.app) scheduler.add(name='test_due_tick_SchedulingError', schedule=always_due) self.assertEqual(scheduler.tick(), 1) self.assertTrue(error.called) def test_due_tick_RuntimeError(self): - scheduler = mSchedulerRuntimeError() + scheduler = mSchedulerRuntimeError(app=self.app) scheduler.add(name='test_due_tick_RuntimeError', schedule=always_due) self.assertEqual(scheduler.tick(), scheduler.max_interval) def test_pending_tick(self): - scheduler = mScheduler() + scheduler = mScheduler(app=self.app) scheduler.add(name='test_pending_tick', schedule=always_pending) self.assertEqual(scheduler.tick(), 1) def test_honors_max_interval(self): - scheduler = mScheduler() + scheduler = mScheduler(app=self.app) maxi = scheduler.max_interval scheduler.add(name='test_honors_max_interval', schedule=mocked_schedule(False, maxi * 4)) self.assertEqual(scheduler.tick(), maxi) def test_ticks(self): - scheduler = mScheduler() + scheduler = mScheduler(app=self.app) nums = [600, 300, 650, 120, 250, 36] s = dict(('test_ticks%s' % i, {'schedule': mocked_schedule(False, j)}) @@ -283,20 +277,20 @@ class test_Scheduler(Case): self.assertEqual(scheduler.tick(), min(nums)) def test_schedule_no_remain(self): - scheduler = mScheduler() + scheduler = mScheduler(app=self.app) scheduler.add(name='test_schedule_no_remain', schedule=mocked_schedule(False, None)) self.assertEqual(scheduler.tick(), scheduler.max_interval) def test_interface(self): - scheduler = mScheduler() + scheduler = mScheduler(app=self.app) scheduler.sync() scheduler.setup_schedule() scheduler.close() def test_merge_inplace(self): - a = mScheduler() - b = mScheduler() + a = mScheduler(app=self.app) + b = mScheduler(app=self.app) a.update_from_dict({'foo': {'schedule': mocked_schedule(True, 10)}, 'bar': {'schedule': mocked_schedule(True, 20)}}) b.update_from_dict({'bar': {'schedule': mocked_schedule(True, 40)}, @@ -329,11 +323,12 @@ def create_persistent_scheduler(shelv=None): return MockPersistentScheduler, shelv -class test_PersistentScheduler(Case): +class test_PersistentScheduler(AppCase): @patch('os.remove') def test_remove_db(self, remove): - s = create_persistent_scheduler()[0](schedule_filename='schedule') + s = create_persistent_scheduler()[0](app=self.app, + schedule_filename='schedule') s._remove_db() remove.assert_has_calls( [call('schedule' + suffix) for suffix in s.known_suffixes] @@ -347,7 +342,8 @@ class test_PersistentScheduler(Case): s._remove_db() def test_setup_schedule(self): - s = create_persistent_scheduler()[0](schedule_filename='schedule') + s = create_persistent_scheduler()[0](app=self.app, + schedule_filename='schedule') opens = s.persistence.open = Mock() s._remove_db = Mock() @@ -362,27 +358,45 @@ class test_PersistentScheduler(Case): s._store = {'__version__': 1} s.setup_schedule() + s._store.clear = Mock() + op = s.persistence.open = Mock() + op.return_value = s._store + s._store['tz'] = 'FUNKY' + s.setup_schedule() + op.assert_called_with(s.schedule_filename, writeback=True) + s._store.clear.assert_called_with() + s._store['utc_enabled'] = False + s._store.clear = Mock() + s.setup_schedule() + s._store.clear.assert_called_with() + def test_get_schedule(self): - s = create_persistent_scheduler()[0](schedule_filename='schedule') + s = create_persistent_scheduler()[0]( + schedule_filename='schedule', app=self.app, + ) s._store = {'entries': {}} s.schedule = {'foo': 'bar'} self.assertDictEqual(s.schedule, {'foo': 'bar'}) self.assertDictEqual(s._store['entries'], s.schedule) -class test_Service(Case): +class test_Service(AppCase): def get_service(self): Scheduler, mock_shelve = create_persistent_scheduler() - return beat.Service(scheduler_cls=Scheduler), mock_shelve + return beat.Service(app=self.app, scheduler_cls=Scheduler), mock_shelve + + def test_pickleable(self): + s = beat.Service(app=self.app, scheduler_cls=Mock) + self.assertTrue(loads(dumps(s))) def test_start(self): s, sh = self.get_service() schedule = s.scheduler.schedule self.assertIsInstance(schedule, dict) self.assertIsInstance(s.scheduler, beat.Scheduler) - scheduled = schedule.keys() - for task_name in sh['entries'].keys(): + scheduled = list(schedule.keys()) + for task_name in keys(sh['entries']): self.assertIn(task_name, scheduled) s.sync() @@ -425,7 +439,7 @@ class test_Service(Case): self.assertTrue(s._is_shutdown.isSet()) -class test_EmbeddedService(Case): +class test_EmbeddedService(AppCase): def test_start_stop_process(self): try: @@ -435,7 +449,7 @@ class test_EmbeddedService(Case): from billiard.process import Process - s = beat.EmbeddedService() + s = beat.EmbeddedService(app=self.app) self.assertIsInstance(s, Process) self.assertIsInstance(s.service, beat.Service) s.service = MockService() @@ -446,7 +460,8 @@ class test_EmbeddedService(Case): def terminate(self): self.terminated = True - s.run() + with patch('celery.platforms.close_open_fds'): + s.run() self.assertTrue(s.service.started) s._popen = _Popen() @@ -455,7 +470,7 @@ class test_EmbeddedService(Case): self.assertTrue(s._popen.terminated) def test_start_stop_threaded(self): - s = beat.EmbeddedService(thread=True) + s = beat.EmbeddedService(thread=True, app=self.app) from threading import Thread self.assertIsInstance(s, Thread) self.assertIsInstance(s.service, beat.Service) @@ -466,3 +481,24 @@ class test_EmbeddedService(Case): s.stop() self.assertTrue(s.service.stopped) + + +class test_schedule(AppCase): + + def test_maybe_make_aware(self): + x = schedule(10, app=self.app) + x.utc_enabled = True + d = x.maybe_make_aware(datetime.utcnow()) + self.assertTrue(d.tzinfo) + x.utc_enabled = False + d2 = x.maybe_make_aware(datetime.utcnow()) + self.assertIsNone(d2.tzinfo) + + def test_to_local(self): + x = schedule(10, app=self.app) + x.utc_enabled = True + d = x.to_local(datetime.utcnow()) + self.assertIsNone(d.tzinfo) + x.utc_enabled = False + d = x.to_local(datetime.utcnow()) + self.assertTrue(d.tzinfo) diff --git a/awx/lib/site-packages/celery/tests/app/test_builtins.py b/awx/lib/site-packages/celery/tests/app/test_builtins.py index dc09fab2a7..9b00c1a253 100644 --- a/awx/lib/site-packages/celery/tests/app/test_builtins.py +++ b/awx/lib/site-packages/celery/tests/app/test_builtins.py @@ -1,162 +1,217 @@ from __future__ import absolute_import -from mock import Mock, patch - -from celery import current_app as app, group, task, chord +from celery import group, chord from celery.app import builtins +from celery.canvas import Signature +from celery.five import range from celery._state import _task_stack -from celery.tests.utils import Case +from celery.tests.case import AppCase, Mock, patch -@task() -def add(x, y): - return x + y +class BuiltinsCase(AppCase): + + def setup(self): + @self.app.task(shared=False) + def xsum(x): + return sum(x) + self.xsum = xsum + + @self.app.task(shared=False) + def add(x, y): + return x + y + self.add = add -@task() -def xsum(x): - return sum(x) - - -class test_backend_cleanup(Case): +class test_backend_cleanup(BuiltinsCase): def test_run(self): - prev = app.backend - app.backend.cleanup = Mock() - app.backend.cleanup.__name__ = 'cleanup' - try: - cleanup_task = builtins.add_backend_cleanup_task(app) - cleanup_task() - self.assertTrue(app.backend.cleanup.called) - finally: - app.backend = prev + self.app.backend.cleanup = Mock() + self.app.backend.cleanup.__name__ = 'cleanup' + cleanup_task = builtins.add_backend_cleanup_task(self.app) + cleanup_task() + self.assertTrue(self.app.backend.cleanup.called) -class test_map(Case): +class test_map(BuiltinsCase): def test_run(self): - @app.task() + @self.app.task(shared=False) def map_mul(x): return x[0] * x[1] - res = app.tasks['celery.map'](map_mul, [(2, 2), (4, 4), (8, 8)]) + res = self.app.tasks['celery.map']( + map_mul, [(2, 2), (4, 4), (8, 8)], + ) self.assertEqual(res, [4, 16, 64]) -class test_starmap(Case): +class test_starmap(BuiltinsCase): def test_run(self): - @app.task() + @self.app.task(shared=False) def smap_mul(x, y): return x * y - res = app.tasks['celery.starmap'](smap_mul, [(2, 2), (4, 4), (8, 8)]) + res = self.app.tasks['celery.starmap']( + smap_mul, [(2, 2), (4, 4), (8, 8)], + ) self.assertEqual(res, [4, 16, 64]) -class test_chunks(Case): +class test_chunks(BuiltinsCase): @patch('celery.canvas.chunks.apply_chunks') def test_run(self, apply_chunks): - @app.task() + @self.app.task(shared=False) def chunks_mul(l): return l - app.tasks['celery.chunks']( + self.app.tasks['celery.chunks']( chunks_mul, [(2, 2), (4, 4), (8, 8)], 1, ) self.assertTrue(apply_chunks.called) -class test_group(Case): +class test_group(BuiltinsCase): - def setUp(self): - self.prev = app.tasks.get('celery.group') - self.task = builtins.add_group_task(app)() - - def tearDown(self): - app.tasks['celery.group'] = self.prev + def setup(self): + self.task = builtins.add_group_task(self.app)() + super(test_group, self).setup() def test_apply_async_eager(self): self.task.apply = Mock() - app.conf.CELERY_ALWAYS_EAGER = True - try: - self.task.apply_async() - finally: - app.conf.CELERY_ALWAYS_EAGER = False + self.app.conf.CELERY_ALWAYS_EAGER = True + self.task.apply_async() self.assertTrue(self.task.apply.called) def test_apply(self): - x = group([add.s(4, 4), add.s(8, 8)]) + x = group([self.add.s(4, 4), self.add.s(8, 8)]) x.name = self.task.name res = x.apply() self.assertEqual(res.get(), [8, 16]) def test_apply_async(self): - x = group([add.s(4, 4), add.s(8, 8)]) + x = group([self.add.s(4, 4), self.add.s(8, 8)]) x.apply_async() + def test_apply_empty(self): + x = group(app=self.app) + x.apply() + res = x.apply_async() + self.assertFalse(res) + self.assertFalse(res.results) + def test_apply_async_with_parent(self): - _task_stack.push(add) + _task_stack.push(self.add) try: - add.push_request(called_directly=False) + self.add.push_request(called_directly=False) try: - assert not add.request.children - x = group([add.s(4, 4), add.s(8, 8)]) + assert not self.add.request.children + x = group([self.add.s(4, 4), self.add.s(8, 8)]) res = x() - self.assertTrue(add.request.children) - self.assertIn(res, add.request.children) - self.assertEqual(len(add.request.children), 1) + self.assertTrue(self.add.request.children) + self.assertIn(res, self.add.request.children) + self.assertEqual(len(self.add.request.children), 1) finally: - add.pop_request() + self.add.pop_request() finally: _task_stack.pop() -class test_chain(Case): +class test_chain(BuiltinsCase): - def setUp(self): - self.prev = app.tasks.get('celery.chain') - self.task = builtins.add_chain_task(app)() - - def tearDown(self): - app.tasks['celery.chain'] = self.prev + def setup(self): + BuiltinsCase.setup(self) + self.task = builtins.add_chain_task(self.app)() def test_apply_async(self): - c = add.s(2, 2) | add.s(4) | add.s(8) + c = self.add.s(2, 2) | self.add.s(4) | self.add.s(8) result = c.apply_async() self.assertTrue(result.parent) self.assertTrue(result.parent.parent) self.assertIsNone(result.parent.parent.parent) + def test_group_to_chord(self): + c = ( + group(self.add.s(i, i) for i in range(5)) | + self.add.s(10) | + self.add.s(20) | + self.add.s(30) + ) + tasks, _ = c.type.prepare_steps((), c.tasks) + self.assertIsInstance(tasks[0], chord) + self.assertTrue(tasks[0].body.options['link']) + self.assertTrue(tasks[0].body.options['link'][0].options['link']) -class test_chord(Case): + c2 = self.add.s(2, 2) | group(self.add.s(i, i) for i in range(10)) + tasks2, _ = c2.type.prepare_steps((), c2.tasks) + self.assertIsInstance(tasks2[1], group) - def setUp(self): - self.prev = app.tasks.get('celery.chord') - self.task = builtins.add_chord_task(app)() + def test_apply_options(self): - def tearDown(self): - app.tasks['celery.chord'] = self.prev + class static(Signature): + + def clone(self, *args, **kwargs): + return self + + def s(*args, **kwargs): + return static(self.add, args, kwargs, type=self.add) + + c = s(2, 2) | s(4, 4) | s(8, 8) + r1 = c.apply_async(task_id='some_id') + self.assertEqual(r1.id, 'some_id') + + c.apply_async(group_id='some_group_id') + self.assertEqual(c.tasks[-1].options['group_id'], 'some_group_id') + + c.apply_async(chord='some_chord_id') + self.assertEqual(c.tasks[-1].options['chord'], 'some_chord_id') + + c.apply_async(link=[s(32)]) + self.assertListEqual(c.tasks[-1].options['link'], [s(32)]) + + c.apply_async(link_error=[s('error')]) + for task in c.tasks: + self.assertListEqual(task.options['link_error'], [s('error')]) + + +class test_chord(BuiltinsCase): + + def setup(self): + self.task = builtins.add_chord_task(self.app)() + super(test_chord, self).setup() def test_apply_async(self): - x = chord([add.s(i, i) for i in xrange(10)], body=xsum.s()) + x = chord([self.add.s(i, i) for i in range(10)], body=self.xsum.s()) r = x.apply_async() self.assertTrue(r) self.assertTrue(r.parent) def test_run_header_not_group(self): - self.task([add.s(i, i) for i in xrange(10)], xsum.s()) + self.task([self.add.s(i, i) for i in range(10)], self.xsum.s()) + + def test_forward_options(self): + body = self.xsum.s() + x = chord([self.add.s(i, i) for i in range(10)], body=body) + x._type = Mock() + x._type.app.conf.CELERY_ALWAYS_EAGER = False + x.apply_async(group_id='some_group_id') + self.assertTrue(x._type.called) + resbody = x._type.call_args[0][1] + self.assertEqual(resbody.options['group_id'], 'some_group_id') + x2 = chord([self.add.s(i, i) for i in range(10)], body=body) + x2._type = Mock() + x2._type.app.conf.CELERY_ALWAYS_EAGER = False + x2.apply_async(chord='some_chord_id') + self.assertTrue(x2._type.called) + resbody = x2._type.call_args[0][1] + self.assertEqual(resbody.options['chord'], 'some_chord_id') def test_apply_eager(self): - app.conf.CELERY_ALWAYS_EAGER = True - try: - x = chord([add.s(i, i) for i in xrange(10)], body=xsum.s()) - r = x.apply_async() - self.assertEqual(r.get(), 90) - - finally: - app.conf.CELERY_ALWAYS_EAGER = False + self.app.conf.CELERY_ALWAYS_EAGER = True + x = chord([self.add.s(i, i) for i in range(10)], body=self.xsum.s()) + r = x.apply_async() + self.assertEqual(r.get(), 90) diff --git a/awx/lib/site-packages/celery/tests/app/test_celery.py b/awx/lib/site-packages/celery/tests/app/test_celery.py index b28c5f0575..5088d353f0 100644 --- a/awx/lib/site-packages/celery/tests/app/test_celery.py +++ b/awx/lib/site-packages/celery/tests/app/test_celery.py @@ -1,10 +1,10 @@ from __future__ import absolute_import -from celery.tests.utils import Case +from celery.tests.case import AppCase import celery -class test_celery_package(Case): +class test_celery_package(AppCase): def test_version(self): self.assertTrue(celery.VERSION) diff --git a/awx/lib/site-packages/celery/tests/app/test_control.py b/awx/lib/site-packages/celery/tests/app/test_control.py index 2b94ba09cd..ad4bc823a6 100644 --- a/awx/lib/site-packages/celery/tests/app/test_control.py +++ b/awx/lib/site-packages/celery/tests/app/test_control.py @@ -1,20 +1,14 @@ from __future__ import absolute_import -from __future__ import with_statement + +import warnings from functools import wraps from kombu.pidbox import Mailbox -from celery.app import app_or_default from celery.app import control -from celery.task import task from celery.utils import uuid -from celery.tests.utils import Case - - -@task() -def mytask(): - pass +from celery.tests.case import AppCase class MockMailbox(Mailbox): @@ -46,17 +40,31 @@ def with_mock_broadcast(fun): return _resets -class test_inspect(Case): +class test_flatten_reply(AppCase): - def setUp(self): - app = self.app = app_or_default() - self.c = Control(app=app) - self.prev, app.control = app.control, self.c + def test_flatten_reply(self): + reply = [ + {'foo@example.com': {'hello': 10}}, + {'foo@example.com': {'hello': 20}}, + {'bar@example.com': {'hello': 30}} + ] + with warnings.catch_warnings(record=True) as w: + nodes = control.flatten_reply(reply) + self.assertIn( + 'multiple replies', + str(w[-1].message), + ) + self.assertIn('foo@example.com', nodes) + self.assertIn('bar@example.com', nodes) + + +class test_inspect(AppCase): + + def setup(self): + self.c = Control(app=self.app) + self.prev, self.app.control = self.app.control, self.c self.i = self.c.inspect() - def tearDown(self): - self.app.control = self.prev - def test_prepare_reply(self): self.assertDictEqual(self.i._prepare([{'w1': {'ok': 1}}, {'w2': {'ok': 1}}]), @@ -71,6 +79,36 @@ class test_inspect(Case): self.i.active() self.assertIn('dump_active', MockMailbox.sent) + @with_mock_broadcast + def test_clock(self): + self.i.clock() + self.assertIn('clock', MockMailbox.sent) + + @with_mock_broadcast + def test_conf(self): + self.i.conf() + self.assertIn('dump_conf', MockMailbox.sent) + + @with_mock_broadcast + def test_hello(self): + self.i.hello('george@vandelay.com') + self.assertIn('hello', MockMailbox.sent) + + @with_mock_broadcast + def test_memsample(self): + self.i.memsample() + self.assertIn('memsample', MockMailbox.sent) + + @with_mock_broadcast + def test_memdump(self): + self.i.memdump() + self.assertIn('memdump', MockMailbox.sent) + + @with_mock_broadcast + def test_objgraph(self): + self.i.objgraph() + self.assertIn('objgraph', MockMailbox.sent) + @with_mock_broadcast def test_scheduled(self): self.i.scheduled() @@ -112,15 +150,16 @@ class test_inspect(Case): self.assertIn('report', MockMailbox.sent) -class test_Broadcast(Case): +class test_Broadcast(AppCase): - def setUp(self): - self.app = app_or_default() + def setup(self): self.control = Control(app=self.app) self.app.control = self.control - def tearDown(self): - del(self.app.control) + @self.app.task(shared=False) + def mytask(): + pass + self.mytask = mytask def test_purge(self): self.control.purge() @@ -145,12 +184,12 @@ class test_Broadcast(Case): @with_mock_broadcast def test_rate_limit(self): - self.control.rate_limit(mytask.name, '100/m') + self.control.rate_limit(self.mytask.name, '100/m') self.assertIn('rate_limit', MockMailbox.sent) @with_mock_broadcast def test_time_limit(self): - self.control.time_limit(mytask.name, soft=10, hard=20) + self.control.time_limit(self.mytask.name, soft=10, hard=20) self.assertIn('time_limit', MockMailbox.sent) @with_mock_broadcast @@ -183,6 +222,21 @@ class test_Broadcast(Case): self.control.ping() self.assertIn('ping', MockMailbox.sent) + @with_mock_broadcast + def test_election(self): + self.control.election('some_id', 'topic', 'action') + self.assertIn('election', MockMailbox.sent) + + @with_mock_broadcast + def test_pool_grow(self): + self.control.pool_grow(2) + self.assertIn('pool_grow', MockMailbox.sent) + + @with_mock_broadcast + def test_pool_shrink(self): + self.control.pool_shrink(2) + self.assertIn('pool_shrink', MockMailbox.sent) + @with_mock_broadcast def test_revoke_from_result(self): self.app.AsyncResult('foozbazzbar').revoke() @@ -191,7 +245,7 @@ class test_Broadcast(Case): @with_mock_broadcast def test_revoke_from_resultset(self): r = self.app.GroupResult(uuid(), - map(self.app.AsyncResult, - [uuid() for i in range(10)])) + [self.app.AsyncResult(x) + for x in [uuid() for i in range(10)]]) r.revoke() self.assertIn('revoke', MockMailbox.sent) diff --git a/awx/lib/site-packages/celery/tests/app/test_defaults.py b/awx/lib/site-packages/celery/tests/app/test_defaults.py index d74c8e28d3..bf87f80ae1 100644 --- a/awx/lib/site-packages/celery/tests/app/test_defaults.py +++ b/awx/lib/site-packages/celery/tests/app/test_defaults.py @@ -1,23 +1,28 @@ from __future__ import absolute_import -from __future__ import with_statement import sys from importlib import import_module -from mock import Mock, patch -from celery.tests.utils import Case, pypy_version, sys_platform +from celery.app.defaults import NAMESPACES + +from celery.tests.case import ( + AppCase, Mock, patch, pypy_version, sys_platform, +) -class test_defaults(Case): +class test_defaults(AppCase): - def setUp(self): + def setup(self): self._prev = sys.modules.pop('celery.app.defaults', None) - def tearDown(self): + def teardown(self): if self._prev: sys.modules['celery.app.defaults'] = self._prev + def test_option_repr(self): + self.assertTrue(repr(NAMESPACES['BROKER']['URL'])) + def test_any(self): val = object() self.assertIs(self.defaults.Option.typemap['any'](val), val) @@ -30,11 +35,11 @@ class test_defaults(Case): def test_default_pool_pypy_15(self): with sys_platform('darwin'): with pypy_version((1, 5, 0)): - self.assertEqual(self.defaults.DEFAULT_POOL, 'processes') + self.assertEqual(self.defaults.DEFAULT_POOL, 'prefork') def test_deprecated(self): source = Mock() - source.BROKER_INSIST = True + source.CELERYD_LOG_LEVEL = 2 with patch('celery.utils.warn_deprecated') as warn: self.defaults.find_deprecated_settings(source) self.assertTrue(warn.called) diff --git a/awx/lib/site-packages/celery/tests/app/test_exceptions.py b/awx/lib/site-packages/celery/tests/app/test_exceptions.py new file mode 100644 index 0000000000..25d2b4ef81 --- /dev/null +++ b/awx/lib/site-packages/celery/tests/app/test_exceptions.py @@ -0,0 +1,35 @@ +from __future__ import absolute_import + +import pickle + +from datetime import datetime + +from celery.exceptions import Reject, Retry + +from celery.tests.case import AppCase + + +class test_Retry(AppCase): + + def test_when_datetime(self): + x = Retry('foo', KeyError(), when=datetime.utcnow()) + self.assertTrue(x.humanize()) + + def test_pickleable(self): + x = Retry('foo', KeyError(), when=datetime.utcnow()) + self.assertTrue(pickle.loads(pickle.dumps(x))) + + +class test_Reject(AppCase): + + def test_attrs(self): + x = Reject('foo', requeue=True) + self.assertEqual(x.reason, 'foo') + self.assertTrue(x.requeue) + + def test_repr(self): + self.assertTrue(repr(Reject('foo', True))) + + def test_pickleable(self): + x = Retry('foo', True) + self.assertTrue(pickle.loads(pickle.dumps(x))) diff --git a/awx/lib/site-packages/celery/tests/app/test_loaders.py b/awx/lib/site-packages/celery/tests/app/test_loaders.py index 4e9f117062..037ef4df68 100644 --- a/awx/lib/site-packages/celery/tests/app/test_loaders.py +++ b/awx/lib/site-packages/celery/tests/app/test_loaders.py @@ -1,16 +1,12 @@ from __future__ import absolute_import -from __future__ import with_statement import os import sys - -from mock import Mock, patch +import warnings from celery import loaders -from celery.app import app_or_default from celery.exceptions import ( NotConfigured, - ImproperlyConfigured, CPendingDeprecationWarning, ) from celery.loaders import base @@ -19,23 +15,9 @@ from celery.loaders.app import AppLoader from celery.utils.imports import NotAPackage from celery.utils.mail import SendmailWarning -from celery.tests.utils import AppCase, Case -from celery.tests.compat import catch_warnings - - -class ObjectConfig(object): - FOO = 1 - BAR = 2 - -object_config = ObjectConfig() -dict_config = dict(FOO=10, BAR=20) - - -class Object(object): - - def __init__(self, **kwargs): - for k, v in kwargs.items(): - setattr(self, k, v) +from celery.tests.case import ( + AppCase, Case, Mock, depends_on_current_app, patch, with_environ, +) class DummyLoader(base.BaseLoader): @@ -47,16 +29,17 @@ class DummyLoader(base.BaseLoader): class test_loaders(AppCase): def test_get_loader_cls(self): - self.assertEqual(loaders.get_loader_cls('default'), default.Loader) + @depends_on_current_app def test_current_loader(self): with self.assertWarnsRegex( CPendingDeprecationWarning, r'deprecation'): self.assertIs(loaders.current_loader(), self.app.loader) + @depends_on_current_app def test_load_settings(self): with self.assertWarnsRegex( CPendingDeprecationWarning, @@ -64,7 +47,7 @@ class test_loaders(AppCase): self.assertIs(loaders.load_settings(), self.app.conf) -class test_LoaderBase(Case): +class test_LoaderBase(AppCase): message_options = {'subject': 'Subject', 'body': 'Body', 'sender': 'x@x.com', @@ -75,14 +58,32 @@ class test_LoaderBase(Case): 'password': 'qwerty', 'timeout': 3} - def setUp(self): - self.loader = DummyLoader() - self.app = app_or_default() + def setup(self): + self.loader = DummyLoader(app=self.app) def test_handlers_pass(self): self.loader.on_task_init('foo.task', 'feedface-cafebabe') self.loader.on_worker_init() + def test_now(self): + self.assertTrue(self.loader.now(utc=True)) + self.assertTrue(self.loader.now(utc=False)) + + def test_read_configuration_no_env(self): + self.assertDictEqual( + base.BaseLoader(app=self.app).read_configuration( + 'FOO_X_S_WE_WQ_Q_WE'), + {}, + ) + + def test_autodiscovery(self): + with patch('celery.loaders.base.autodiscover_tasks') as auto: + auto.return_value = [Mock()] + auto.return_value[0].__name__ = 'moo' + self.loader.autodiscover_tasks(['A', 'B']) + self.assertIn('moo', self.loader.task_modules) + self.loader.task_modules.discard('moo') + def test_import_task_module(self): self.assertEqual(sys, self.loader.import_task_module('sys')) @@ -104,15 +105,11 @@ class test_LoaderBase(Case): def test_import_default_modules(self): modnames = lambda l: [m.__name__ for m in l] - prev, self.app.conf.CELERY_IMPORTS = ( - self.app.conf.CELERY_IMPORTS, ('os', 'sys')) - try: - self.assertEqual( - sorted(modnames(self.loader.import_default_modules())), - sorted(modnames([os, sys])), - ) - finally: - self.app.conf.CELERY_IMPORTS = prev + self.app.conf.CELERY_IMPORTS = ('os', 'sys') + self.assertEqual( + sorted(modnames(self.loader.import_default_modules())), + sorted(modnames([os, sys])), + ) def test_import_from_cwd_custom_imp(self): @@ -147,7 +144,7 @@ class test_LoaderBase(Case): def test_mail_attribute(self): from celery.utils import mail - loader = base.BaseLoader() + loader = base.BaseLoader(app=self.app) self.assertIs(loader.mail, mail) def test_cmdline_config_ValueError(self): @@ -155,34 +152,32 @@ class test_LoaderBase(Case): self.loader.cmdline_config_parser(['broker.port=foobar']) -class test_DefaultLoader(Case): +class test_DefaultLoader(AppCase): @patch('celery.loaders.base.find_module') def test_read_configuration_not_a_package(self, find_module): find_module.side_effect = NotAPackage() - l = default.Loader() + l = default.Loader(app=self.app) with self.assertRaises(NotAPackage): - l.read_configuration() + l.read_configuration(fail_silently=False) @patch('celery.loaders.base.find_module') + @with_environ('CELERY_CONFIG_MODULE', 'celeryconfig.py') def test_read_configuration_py_in_name(self, find_module): - prev = os.environ['CELERY_CONFIG_MODULE'] - os.environ['CELERY_CONFIG_MODULE'] = 'celeryconfig.py' - try: - find_module.side_effect = NotAPackage() - l = default.Loader() - with self.assertRaises(NotAPackage): - l.read_configuration() - finally: - os.environ['CELERY_CONFIG_MODULE'] = prev + find_module.side_effect = NotAPackage() + l = default.Loader(app=self.app) + with self.assertRaises(NotAPackage): + l.read_configuration(fail_silently=False) @patch('celery.loaders.base.find_module') def test_read_configuration_importerror(self, find_module): default.C_WNOCONF = True find_module.side_effect = ImportError() - l = default.Loader() + l = default.Loader(app=self.app) with self.assertWarnsRegex(NotConfigured, r'make sure it exists'): - l.read_configuration() + l.read_configuration(fail_silently=True) + default.C_WNOCONF = False + l.read_configuration(fail_silently=True) def test_read_configuration(self): from types import ModuleType @@ -190,17 +185,18 @@ class test_DefaultLoader(Case): class ConfigModule(ModuleType): pass - celeryconfig = ConfigModule('celeryconfig') - celeryconfig.CELERY_IMPORTS = ('os', 'sys') configname = os.environ.get('CELERY_CONFIG_MODULE') or 'celeryconfig' + celeryconfig = ConfigModule(configname) + celeryconfig.CELERY_IMPORTS = ('os', 'sys') prevconfig = sys.modules.get(configname) sys.modules[configname] = celeryconfig try: - l = default.Loader() - settings = l.read_configuration() + l = default.Loader(app=self.app) + l.find_module = Mock(name='find_module') + settings = l.read_configuration(fail_silently=False) self.assertTupleEqual(settings.CELERY_IMPORTS, ('os', 'sys')) - settings = l.read_configuration() + settings = l.read_configuration(fail_silently=False) self.assertTupleEqual(settings.CELERY_IMPORTS, ('os', 'sys')) l.on_worker_init() finally: @@ -208,7 +204,7 @@ class test_DefaultLoader(Case): sys.modules[configname] = prevconfig def test_import_from_cwd(self): - l = default.Loader() + l = default.Loader(app=self.app) old_path = list(sys.path) try: sys.path.remove(os.getcwd()) @@ -232,45 +228,47 @@ class test_DefaultLoader(Case): def find_module(self, name): raise ImportError(name) - with catch_warnings(record=True): - l = _Loader() + with warnings.catch_warnings(record=True): + l = _Loader(app=self.app) self.assertFalse(l.configured) context_executed[0] = True self.assertTrue(context_executed[0]) -class test_AppLoader(Case): +class test_AppLoader(AppCase): - def setUp(self): - self.app = app_or_default() + def setup(self): self.loader = AppLoader(app=self.app) - def test_config_from_envvar(self, key='CELERY_HARNESS_CFG1'): - self.assertFalse(self.loader.config_from_envvar('HDSAJIHWIQHEWQU', - silent=True)) - with self.assertRaises(ImproperlyConfigured): - self.loader.config_from_envvar('HDSAJIHWIQHEWQU', silent=False) - os.environ[key] = __name__ + '.object_config' - self.assertTrue(self.loader.config_from_envvar(key)) - self.assertEqual(self.loader.conf['FOO'], 1) - self.assertEqual(self.loader.conf['BAR'], 2) - - os.environ[key] = 'unknown_asdwqe.asdwqewqe' - with self.assertRaises(ImportError): - self.loader.config_from_envvar(key, silent=False) - self.assertFalse(self.loader.config_from_envvar(key, silent=True)) - - os.environ[key] = __name__ + '.dict_config' - self.assertTrue(self.loader.config_from_envvar(key)) - self.assertEqual(self.loader.conf['FOO'], 10) - self.assertEqual(self.loader.conf['BAR'], 20) - def test_on_worker_init(self): - prev, self.app.conf.CELERY_IMPORTS = ( - self.app.conf.CELERY_IMPORTS, ('subprocess', )) + self.app.conf.CELERY_IMPORTS = ('subprocess', ) + sys.modules.pop('subprocess', None) + self.loader.init_worker() + self.assertIn('subprocess', sys.modules) + + +class test_autodiscovery(Case): + + def test_autodiscover_tasks(self): + base._RACE_PROTECTION = True try: - sys.modules.pop('subprocess', None) - self.loader.init_worker() - self.assertIn('subprocess', sys.modules) + base.autodiscover_tasks(['foo']) finally: - self.app.conf.CELERY_IMPORTS = prev + base._RACE_PROTECTION = False + with patch('celery.loaders.base.find_related_module') as frm: + base.autodiscover_tasks(['foo']) + self.assertTrue(frm.called) + + def test_find_related_module(self): + with patch('importlib.import_module') as imp: + with patch('imp.find_module') as find: + imp.return_value = Mock() + imp.return_value.__path__ = 'foo' + base.find_related_module(base, 'tasks') + + imp.side_effect = AttributeError() + base.find_related_module(base, 'tasks') + imp.side_effect = None + + find.side_effect = ImportError() + base.find_related_module(base, 'tasks') diff --git a/awx/lib/site-packages/celery/tests/app/test_log.py b/awx/lib/site-packages/celery/tests/app/test_log.py index 75cc68b990..8b6823b1fc 100644 --- a/awx/lib/site-packages/celery/tests/app/test_log.py +++ b/awx/lib/site-packages/celery/tests/app/test_log.py @@ -1,16 +1,11 @@ from __future__ import absolute_import -from __future__ import with_statement import sys import logging from tempfile import mktemp -from mock import patch, Mock -from nose import SkipTest - -from celery import current_app from celery import signals -from celery.app.log import Logging, TaskFormatter +from celery.app.log import TaskFormatter from celery.utils.log import LoggingProxy from celery.utils import uuid from celery.utils.log import ( @@ -18,15 +13,18 @@ from celery.utils.log import ( ColorFormatter, logger as base_logger, get_task_logger, + task_logger, + in_sighandler, + logger_isa, + _patch_logger_class, ) -from celery.tests.utils import ( - AppCase, Case, override_stdouts, wrap_logger, get_handlers, +from celery.tests.case import ( + AppCase, Mock, SkipTest, + get_handlers, override_stdouts, patch, wrap_logger, restore_logging, ) -log = current_app.log - -class test_TaskFormatter(Case): +class test_TaskFormatter(AppCase): def test_no_task(self): class Record(object): @@ -44,7 +42,54 @@ class test_TaskFormatter(Case): self.assertEqual(record.task_id, '???') -class test_ColorFormatter(Case): +class test_logger_isa(AppCase): + + def test_isa(self): + x = get_task_logger('Z1george') + self.assertTrue(logger_isa(x, task_logger)) + prev_x, x.parent = x.parent, None + try: + self.assertFalse(logger_isa(x, task_logger)) + finally: + x.parent = prev_x + + y = get_task_logger('Z1elaine') + y.parent = x + self.assertTrue(logger_isa(y, task_logger)) + self.assertTrue(logger_isa(y, x)) + self.assertTrue(logger_isa(y, y)) + + z = get_task_logger('Z1jerry') + z.parent = y + self.assertTrue(logger_isa(z, task_logger)) + self.assertTrue(logger_isa(z, y)) + self.assertTrue(logger_isa(z, x)) + self.assertTrue(logger_isa(z, z)) + + def test_recursive(self): + x = get_task_logger('X1foo') + prev, x.parent = x.parent, x + try: + with self.assertRaises(RuntimeError): + logger_isa(x, task_logger) + finally: + x.parent = prev + + y = get_task_logger('X2foo') + z = get_task_logger('X2foo') + prev_y, y.parent = y.parent, z + try: + prev_z, z.parent = z.parent, y + try: + with self.assertRaises(RuntimeError): + logger_isa(y, task_logger) + finally: + z.parent = prev_z + finally: + y.parent = prev_y + + +class test_ColorFormatter(AppCase): @patch('celery.utils.log.safe_str') @patch('logging.Formatter.formatException') @@ -65,6 +110,15 @@ class test_ColorFormatter(Case): if sys.version_info[0] == 2: self.assertTrue(safe_str.called) + @patch('logging.Formatter.format') + def test_format_object(self, _format): + x = ColorFormatter(object()) + x.use_color = True + record = Mock() + record.levelname = 'ERROR' + record.msg = object() + self.assertTrue(x.format(record)) + @patch('celery.utils.log.safe_str') def test_format_raises(self, safe_str): x = ColorFormatter('HELLO') @@ -93,7 +147,7 @@ class test_ColorFormatter(Case): x.format(record) self.assertIn('= 3: + raise + else: + break + + def assertRelativedelta(self, due, last_ran): + try: + from dateutil.relativedelta import relativedelta + except ImportError: + return + l1, d1, n1 = due.remaining_delta(last_ran) + l2, d2, n2 = due.remaining_delta(last_ran, ffwd=relativedelta) + if not isinstance(d1, relativedelta): + self.assertEqual(l1, l2) + for field, value in items(d1._fields()): + self.assertEqual(getattr(d1, field), value) + self.assertFalse(d2.years) + self.assertFalse(d2.months) + self.assertFalse(d2.days) + self.assertFalse(d2.leapdays) + self.assertFalse(d2.hours) + self.assertFalse(d2.minutes) + self.assertFalse(d2.seconds) + self.assertFalse(d2.microseconds) + + def test_every_minute_execution_is_due(self): + last_ran = self.now - timedelta(seconds=61) + due, remaining = self.every_minute.is_due(last_ran) + self.assertRelativedelta(self.every_minute, last_ran) + self.assertTrue(due) + self.seconds_almost_equal(remaining, self.next_minute, 1) + + def test_every_minute_execution_is_not_due(self): + last_ran = self.now - timedelta(seconds=self.now.second) + due, remaining = self.every_minute.is_due(last_ran) + self.assertFalse(due) + self.seconds_almost_equal(remaining, self.next_minute, 1) + + def test_execution_is_due_on_saturday(self): + # 29th of May 2010 is a saturday + with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 29, 10, 30)): + last_ran = self.now - timedelta(seconds=61) + due, remaining = self.every_minute.is_due(last_ran) + self.assertTrue(due) + self.seconds_almost_equal(remaining, self.next_minute, 1) + + def test_execution_is_due_on_sunday(self): + # 30th of May 2010 is a sunday + with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 30, 10, 30)): + last_ran = self.now - timedelta(seconds=61) + due, remaining = self.every_minute.is_due(last_ran) + self.assertTrue(due) + self.seconds_almost_equal(remaining, self.next_minute, 1) + + def test_execution_is_due_on_monday(self): + # 31st of May 2010 is a monday + with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 31, 10, 30)): + last_ran = self.now - timedelta(seconds=61) + due, remaining = self.every_minute.is_due(last_ran) + self.assertTrue(due) + self.seconds_almost_equal(remaining, self.next_minute, 1) + + def test_every_hour_execution_is_due(self): + with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 10, 10, 30)): + due, remaining = self.hourly.is_due(datetime(2010, 5, 10, 6, 30)) + self.assertTrue(due) + self.assertEqual(remaining, 60 * 60) + + def test_every_hour_execution_is_not_due(self): + with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 10, 10, 29)): + due, remaining = self.hourly.is_due(datetime(2010, 5, 10, 9, 30)) + self.assertFalse(due) + self.assertEqual(remaining, 60) + + def test_first_quarter_execution_is_due(self): + with patch_crontab_nowfun( + self.quarterly, datetime(2010, 5, 10, 10, 15)): + due, remaining = self.quarterly.is_due( + datetime(2010, 5, 10, 6, 30), + ) + self.assertTrue(due) + self.assertEqual(remaining, 15 * 60) + + def test_second_quarter_execution_is_due(self): + with patch_crontab_nowfun( + self.quarterly, datetime(2010, 5, 10, 10, 30)): + due, remaining = self.quarterly.is_due( + datetime(2010, 5, 10, 6, 30), + ) + self.assertTrue(due) + self.assertEqual(remaining, 15 * 60) + + def test_first_quarter_execution_is_not_due(self): + with patch_crontab_nowfun( + self.quarterly, datetime(2010, 5, 10, 10, 14)): + due, remaining = self.quarterly.is_due( + datetime(2010, 5, 10, 10, 0), + ) + self.assertFalse(due) + self.assertEqual(remaining, 60) + + def test_second_quarter_execution_is_not_due(self): + with patch_crontab_nowfun( + self.quarterly, datetime(2010, 5, 10, 10, 29)): + due, remaining = self.quarterly.is_due( + datetime(2010, 5, 10, 10, 15), + ) + self.assertFalse(due) + self.assertEqual(remaining, 60) + + def test_daily_execution_is_due(self): + with patch_crontab_nowfun(self.daily, datetime(2010, 5, 10, 7, 30)): + due, remaining = self.daily.is_due(datetime(2010, 5, 9, 7, 30)) + self.assertTrue(due) + self.assertEqual(remaining, 24 * 60 * 60) + + def test_daily_execution_is_not_due(self): + with patch_crontab_nowfun(self.daily, datetime(2010, 5, 10, 10, 30)): + due, remaining = self.daily.is_due(datetime(2010, 5, 10, 7, 30)) + self.assertFalse(due) + self.assertEqual(remaining, 21 * 60 * 60) + + def test_weekly_execution_is_due(self): + with patch_crontab_nowfun(self.weekly, datetime(2010, 5, 6, 7, 30)): + due, remaining = self.weekly.is_due(datetime(2010, 4, 30, 7, 30)) + self.assertTrue(due) + self.assertEqual(remaining, 7 * 24 * 60 * 60) + + def test_weekly_execution_is_not_due(self): + with patch_crontab_nowfun(self.weekly, datetime(2010, 5, 7, 10, 30)): + due, remaining = self.weekly.is_due(datetime(2010, 5, 6, 7, 30)) + self.assertFalse(due) + self.assertEqual(remaining, 6 * 24 * 60 * 60 - 3 * 60 * 60) + + def test_monthly_execution_is_due(self): + with patch_crontab_nowfun(self.monthly, datetime(2010, 5, 13, 7, 30)): + due, remaining = self.monthly.is_due(datetime(2010, 4, 8, 7, 30)) + self.assertTrue(due) + self.assertEqual(remaining, 28 * 24 * 60 * 60) + + def test_monthly_execution_is_not_due(self): + with patch_crontab_nowfun(self.monthly, datetime(2010, 5, 9, 10, 30)): + due, remaining = self.monthly.is_due(datetime(2010, 4, 8, 7, 30)) + self.assertFalse(due) + self.assertEqual(remaining, 4 * 24 * 60 * 60 - 3 * 60 * 60) + + def test_monthly_moy_execution_is_due(self): + with patch_crontab_nowfun( + self.monthly_moy, datetime(2014, 2, 26, 22, 0)): + due, remaining = self.monthly_moy.is_due( + datetime(2013, 7, 4, 10, 0), + ) + self.assertTrue(due) + self.assertEqual(remaining, 60.) + + def test_monthly_moy_execution_is_not_due(self): + raise SkipTest('unstable test') + with patch_crontab_nowfun( + self.monthly_moy, datetime(2013, 6, 28, 14, 30)): + due, remaining = self.monthly_moy.is_due( + datetime(2013, 6, 28, 22, 14), + ) + self.assertFalse(due) + attempt = ( + time.mktime(datetime(2014, 2, 26, 22, 0).timetuple()) - + time.mktime(datetime(2013, 6, 28, 14, 30).timetuple()) - + 60 * 60 + ) + self.assertEqual(remaining, attempt) + + def test_monthly_moy_execution_is_due2(self): + with patch_crontab_nowfun( + self.monthly_moy, datetime(2014, 2, 26, 22, 0)): + due, remaining = self.monthly_moy.is_due( + datetime(2013, 2, 28, 10, 0), + ) + self.assertTrue(due) + self.assertEqual(remaining, 60.) + + def test_monthly_moy_execution_is_not_due2(self): + with patch_crontab_nowfun( + self.monthly_moy, datetime(2014, 2, 26, 21, 0)): + due, remaining = self.monthly_moy.is_due( + datetime(2013, 6, 28, 22, 14), + ) + self.assertFalse(due) + attempt = 60 * 60 + self.assertEqual(remaining, attempt) + + def test_yearly_execution_is_due(self): + with patch_crontab_nowfun(self.yearly, datetime(2010, 3, 11, 7, 30)): + due, remaining = self.yearly.is_due(datetime(2009, 3, 12, 7, 30)) + self.assertTrue(due) + self.assertEqual(remaining, 364 * 24 * 60 * 60) + + def test_yearly_execution_is_not_due(self): + with patch_crontab_nowfun(self.yearly, datetime(2010, 3, 7, 10, 30)): + due, remaining = self.yearly.is_due(datetime(2009, 3, 12, 7, 30)) + self.assertFalse(due) + self.assertEqual(remaining, 4 * 24 * 60 * 60 - 3 * 60 * 60) diff --git a/awx/lib/site-packages/celery/tests/app/test_utils.py b/awx/lib/site-packages/celery/tests/app/test_utils.py index e4e5de6354..dc7e381104 100644 --- a/awx/lib/site-packages/celery/tests/app/test_utils.py +++ b/awx/lib/site-packages/celery/tests/app/test_utils.py @@ -1,25 +1,30 @@ -""" -Tests of celery.app.utils -""" - from __future__ import absolute_import +from collections import Mapping, MutableMapping -import unittest +from celery.app.utils import Settings, bugreport + +from celery.tests.case import AppCase, Mock -class TestSettings(unittest.TestCase): +class TestSettings(AppCase): """ Tests of celery.app.utils.Settings """ def test_is_mapping(self): """Settings should be a collections.Mapping""" - from celery.app.utils import Settings - from collections import Mapping self.assertTrue(issubclass(Settings, Mapping)) def test_is_mutable_mapping(self): """Settings should be a collections.MutableMapping""" - from celery.app.utils import Settings - from collections import MutableMapping self.assertTrue(issubclass(Settings, MutableMapping)) + + +class test_bugreport(AppCase): + + def test_no_conn_driver_info(self): + self.app.connection = Mock() + conn = self.app.connection.return_value = Mock() + conn.transport = None + + bugreport(self.app) diff --git a/awx/lib/site-packages/celery/tests/backends/test_amqp.py b/awx/lib/site-packages/celery/tests/backends/test_amqp.py index f855454444..30a468446f 100644 --- a/awx/lib/site-packages/celery/tests/backends/test_amqp.py +++ b/awx/lib/site-packages/celery/tests/backends/test_amqp.py @@ -1,21 +1,23 @@ from __future__ import absolute_import -from __future__ import with_statement import pickle import socket +from contextlib import contextmanager from datetime import timedelta -from Queue import Empty, Queue +from pickle import dumps, loads + +from billiard.einfo import ExceptionInfo -from celery import current_app from celery import states -from celery.app import app_or_default from celery.backends.amqp import AMQPBackend -from celery.datastructures import ExceptionInfo from celery.exceptions import TimeoutError +from celery.five import Empty, Queue, range from celery.utils import uuid -from celery.tests.utils import AppCase, sleepdeprived, Mock +from celery.tests.case import ( + AppCase, Mock, depends_on_current_app, patch, sleepdeprived, +) class SomeClass(object): @@ -27,8 +29,8 @@ class SomeClass(object): class test_AMQPBackend(AppCase): def create_backend(self, **opts): - opts = dict(dict(serializer='pickle', persistent=False), **opts) - return AMQPBackend(**opts) + opts = dict(dict(serializer='pickle', persistent=True), **opts) + return AMQPBackend(self.app, **opts) def test_mark_as_done(self): tb1 = self.create_backend() @@ -42,6 +44,10 @@ class test_AMQPBackend(AppCase): self.assertTrue(tb2._cache.get(tid)) self.assertTrue(tb2.get_result(tid), 42) + @depends_on_current_app + def test_pickleable(self): + self.assertTrue(loads(dumps(self.create_backend()))) + def test_revive(self): tb = self.create_backend() tb.revive(None) @@ -65,7 +71,7 @@ class test_AMQPBackend(AppCase): tid3 = uuid() try: raise KeyError('foo') - except KeyError, exception: + except KeyError as exception: einfo = ExceptionInfo() tb1.mark_as_failure(tid3, exception, traceback=einfo.traceback) self.assertEqual(tb2.get_status(tid3), states.FAILURE) @@ -78,16 +84,6 @@ class test_AMQPBackend(AppCase): tid = uuid() self.assertEqual(repair_uuid(tid.replace('-', '')), tid) - def test_expires_defaults_to_config_deprecated_setting(self): - app = app_or_default() - prev = app.conf.CELERY_AMQP_TASK_RESULT_EXPIRES - app.conf.CELERY_AMQP_TASK_RESULT_EXPIRES = 10 - try: - b = self.create_backend() - self.assertEqual(b.queue_arguments.get('x-expires'), 10 * 1000.0) - finally: - app.conf.CELERY_AMQP_TASK_RESULT_EXPIRES = prev - def test_expires_is_int(self): b = self.create_backend(expires=48) self.assertEqual(b.queue_arguments.get('x-expires'), 48 * 1000.0) @@ -111,7 +107,7 @@ class test_AMQPBackend(AppCase): iterations[0] += 1 raise KeyError('foo') - backend = AMQPBackend() + backend = AMQPBackend(self.app) from celery.app.amqp import TaskProducer prod, TaskProducer.publish = TaskProducer.publish, publish try: @@ -132,8 +128,8 @@ class test_AMQPBackend(AppCase): b = self.create_backend() self.assertState(b.get_task_meta(uuid()), states.PENDING) - def test_poll_result(self): - + @contextmanager + def _result_context(self): results = Queue() class Message(object): @@ -164,9 +160,12 @@ class test_AMQPBackend(AppCase): def declare(self): pass - def get(self, no_ack=False): + def get(self, no_ack=False, accept=None): try: - return results.get(block=False) + m = results.get(block=False) + if m: + m.accept = accept + return m except Empty: pass @@ -176,36 +175,50 @@ class test_AMQPBackend(AppCase): class MockBackend(AMQPBackend): Queue = MockBinding - backend = MockBackend() + backend = MockBackend(self.app) backend._republish = Mock() - # FFWD's to the latest state. - state_messages = [ - Message(status=states.RECEIVED, seq=1), - Message(status=states.STARTED, seq=2), - Message(status=states.FAILURE, seq=3), - ] - for state_message in state_messages: - results.put(state_message) - r1 = backend.get_task_meta(uuid()) - self.assertDictContainsSubset({'status': states.FAILURE, - 'seq': 3}, r1, - 'FFWDs to the last state') + yield results, backend, Message - # Caches last known state. - results.put(Message()) - tid = uuid() - backend.get_task_meta(tid) - self.assertIn(tid, backend._cache, 'Caches last known state') + def test_backlog_limit_exceeded(self): + with self._result_context() as (results, backend, Message): + for i in range(1001): + results.put(Message(status=states.RECEIVED)) + with self.assertRaises(backend.BacklogLimitExceeded): + backend.get_task_meta('id') - self.assertTrue(state_messages[-1].requeued) + def test_poll_result(self): + with self._result_context() as (results, backend, Message): + # FFWD's to the latest state. + state_messages = [ + Message(status=states.RECEIVED, seq=1), + Message(status=states.STARTED, seq=2), + Message(status=states.FAILURE, seq=3), + ] + for state_message in state_messages: + results.put(state_message) + r1 = backend.get_task_meta(uuid()) + self.assertDictContainsSubset( + {'status': states.FAILURE, 'seq': 3}, r1, + 'FFWDs to the last state', + ) - # Returns cache if no new states. - results.queue.clear() - assert not results.qsize() - backend._cache[tid] = 'hello' - self.assertEqual(backend.get_task_meta(tid), 'hello', - 'Returns cache if no new states') + # Caches last known state. + results.put(Message()) + tid = uuid() + backend.get_task_meta(tid) + self.assertIn(tid, backend._cache, 'Caches last known state') + + self.assertTrue(state_messages[-1].requeued) + + # Returns cache if no new states. + results.queue.clear() + assert not results.qsize() + backend._cache[tid] = 'hello' + self.assertEqual( + backend.get_task_meta(tid), 'hello', + 'Returns cache if no new states', + ) def test_wait_for(self): b = self.create_backend() @@ -228,6 +241,10 @@ class test_AMQPBackend(AppCase): b.store_result(tid, KeyError('foo'), states.FAILURE) with self.assertRaises(KeyError): b.wait_for(tid, timeout=1, cache=False) + self.assertTrue(b.wait_for(tid, timeout=1, propagate=False)) + b.store_result(tid, KeyError('foo'), states.PENDING) + with self.assertRaises(TimeoutError): + b.wait_for(tid, timeout=0.01, cache=False) def test_drain_events_remaining_timeouts(self): @@ -237,7 +254,7 @@ class test_AMQPBackend(AppCase): pass b = self.create_backend() - with current_app.pool.acquire_channel(block=False) as (_, channel): + with self.app.pool.acquire_channel(block=False) as (_, channel): binding = b._create_binding(uuid()) consumer = b.Consumer(channel, binding, no_ack=True) with self.assertRaises(socket.timeout): @@ -247,7 +264,7 @@ class test_AMQPBackend(AppCase): b = self.create_backend() tids = [] - for i in xrange(10): + for i in range(10): tid = uuid() b.store_result(tid, i, states.SUCCESS) tids.append(tid) @@ -263,43 +280,54 @@ class test_AMQPBackend(AppCase): self.assertDictEqual(b._cache[res[0][0]], res[0][1]) cached_res = list(b.get_many(tids, timeout=1)) self.assertEqual(sorted(cached_res), sorted(expected_results)) + + # times out when not ready in cache (this shouldn't happen) b._cache[res[0][0]]['status'] = states.RETRY with self.assertRaises(socket.timeout): list(b.get_many(tids, timeout=0.01)) - def test_test_get_many_raises_outer_block(self): + # times out when result not yet ready + with self.assertRaises(socket.timeout): + tids = [uuid()] + b.store_result(tids[0], i, states.PENDING) + list(b.get_many(tids, timeout=0.01)) + + def test_get_many_raises_outer_block(self): class Backend(AMQPBackend): def Consumer(*args, **kwargs): raise KeyError('foo') - b = Backend() + b = Backend(self.app) with self.assertRaises(KeyError): - b.get_many(['id1']).next() + next(b.get_many(['id1'])) - def test_test_get_many_raises_inner_block(self): + def test_get_many_raises_inner_block(self): + with patch('kombu.connection.Connection.drain_events') as drain: + drain.side_effect = KeyError('foo') + b = AMQPBackend(self.app) + with self.assertRaises(KeyError): + next(b.get_many(['id1'])) - class Backend(AMQPBackend): + def test_consume_raises_inner_block(self): + with patch('kombu.connection.Connection.drain_events') as drain: - def drain_events(self, *args, **kwargs): + def se(*args, **kwargs): + drain.side_effect = ValueError() raise KeyError('foo') - - b = Backend() - with self.assertRaises(KeyError): - b.get_many(['id1']).next() + drain.side_effect = se + b = AMQPBackend(self.app) + with self.assertRaises(ValueError): + next(b.consume('id1')) def test_no_expires(self): b = self.create_backend(expires=None) - app = app_or_default() - prev = app.conf.CELERY_AMQP_TASK_RESULT_EXPIRES - app.conf.CELERY_AMQP_TASK_RESULT_EXPIRES = None - try: - b = self.create_backend(expires=None) - with self.assertRaises(KeyError): - b.queue_arguments['x-expires'] - finally: - app.conf.CELERY_AMQP_TASK_RESULT_EXPIRES = prev + app = self.app + app.conf.CELERY_TASK_RESULT_EXPIRES = None + b = self.create_backend(expires=None) + with self.assertRaises(KeyError): + b.queue_arguments['x-expires'] def test_process_cleanup(self): self.create_backend().process_cleanup() diff --git a/awx/lib/site-packages/celery/tests/backends/test_backends.py b/awx/lib/site-packages/celery/tests/backends/test_backends.py index 467ef5eecd..bba6127703 100644 --- a/awx/lib/site-packages/celery/tests/backends/test_backends.py +++ b/awx/lib/site-packages/celery/tests/backends/test_backends.py @@ -1,43 +1,44 @@ from __future__ import absolute_import -from __future__ import with_statement -from mock import patch - -from celery import current_app from celery import backends from celery.backends.amqp import AMQPBackend from celery.backends.cache import CacheBackend -from celery.tests.utils import Case +from celery.tests.case import AppCase, depends_on_current_app, patch -class test_backends(Case): +class test_backends(AppCase): def test_get_backend_aliases(self): - expects = [('amqp', AMQPBackend), - ('cache', CacheBackend)] - for expect_name, expect_cls in expects: - self.assertIsInstance(backends.get_backend_cls(expect_name)(), - expect_cls) + expects = [('amqp://', AMQPBackend), + ('cache+memory://', CacheBackend)] + + for url, expect_cls in expects: + backend, url = backends.get_backend_by_url(url, self.app.loader) + self.assertIsInstance( + backend(app=self.app, url=url), + expect_cls, + ) def test_get_backend_cache(self): backends.get_backend_cls.clear() hits = backends.get_backend_cls.hits misses = backends.get_backend_cls.misses - self.assertTrue(backends.get_backend_cls('amqp')) + self.assertTrue(backends.get_backend_cls('amqp', self.app.loader)) self.assertEqual(backends.get_backend_cls.misses, misses + 1) - self.assertTrue(backends.get_backend_cls('amqp')) + self.assertTrue(backends.get_backend_cls('amqp', self.app.loader)) self.assertEqual(backends.get_backend_cls.hits, hits + 1) def test_unknown_backend(self): with self.assertRaises(ImportError): - backends.get_backend_cls('fasodaopjeqijwqe') + backends.get_backend_cls('fasodaopjeqijwqe', self.app.loader) + @depends_on_current_app def test_default_backend(self): - self.assertEqual(backends.default_backend, current_app.backend) + self.assertEqual(backends.default_backend, self.app.backend) def test_backend_by_url(self, url='redis://localhost/1'): from celery.backends.redis import RedisBackend - backend, url_ = backends.get_backend_by_url(url) + backend, url_ = backends.get_backend_by_url(url, self.app.loader) self.assertIs(backend, RedisBackend) self.assertEqual(url_, url) @@ -45,4 +46,4 @@ class test_backends(Case): with patch('celery.backends.symbol_by_name') as sbn: sbn.side_effect = ValueError() with self.assertRaises(ValueError): - backends.get_backend_cls('xxx.xxx:foo') + backends.get_backend_cls('xxx.xxx:foo', self.app.loader) diff --git a/awx/lib/site-packages/celery/tests/backends/test_base.py b/awx/lib/site-packages/celery/tests/backends/test_base.py index b04919c3b4..b86ea68b5e 100644 --- a/awx/lib/site-packages/celery/tests/backends/test_base.py +++ b/awx/lib/site-packages/celery/tests/backends/test_base.py @@ -1,14 +1,12 @@ from __future__ import absolute_import -from __future__ import with_statement import sys import types -from mock import Mock -from nose import SkipTest +from contextlib import contextmanager -from celery import current_app -from celery.result import AsyncResult, GroupResult +from celery.exceptions import ChordError +from celery.five import items, range from celery.utils import serialization from celery.utils.serialization import subclass_exception from celery.utils.serialization import find_pickleable_exception as fnpe @@ -16,11 +14,14 @@ from celery.utils.serialization import UnpickleableExceptionWrapper from celery.utils.serialization import get_pickleable_exception as gpe from celery import states -from celery.backends.base import BaseBackend, KeyValueStoreBackend -from celery.backends.base import BaseDictBackend, DisabledBackend +from celery.backends.base import ( + BaseBackend, + KeyValueStoreBackend, + DisabledBackend, +) from celery.utils import uuid -from celery.tests.utils import Case +from celery.tests.case import AppCase, Mock, SkipTest, patch class wrapobject(object): @@ -35,10 +36,9 @@ else: Unpickleable = subclass_exception('Unpickleable', KeyError, 'foo.module') Impossible = subclass_exception('Impossible', object, 'foo.module') Lookalike = subclass_exception('Lookalike', wrapobject, 'foo.module') -b = BaseBackend() -class test_serialization(Case): +class test_serialization(AppCase): def test_create_exception_cls(self): self.assertTrue(serialization.create_exception_cls('FooError', 'm')) @@ -46,79 +46,37 @@ class test_serialization(Case): KeyError)) -class test_BaseBackend_interface(Case): +class test_BaseBackend_interface(AppCase): - def test_get_status(self): - with self.assertRaises(NotImplementedError): - b.get_status('SOMExx-N0Nex1stant-IDxx-') + def setup(self): + self.b = BaseBackend(self.app) def test__forget(self): with self.assertRaises(NotImplementedError): - b.forget('SOMExx-N0Nex1stant-IDxx-') - - def test_get_children(self): - with self.assertRaises(NotImplementedError): - b.get_children('SOMExx-N0Nex1stant-IDxx-') - - def test_store_result(self): - with self.assertRaises(NotImplementedError): - b.store_result('SOMExx-N0nex1stant-IDxx-', 42, states.SUCCESS) - - def test_mark_as_started(self): - with self.assertRaises(NotImplementedError): - b.mark_as_started('SOMExx-N0nex1stant-IDxx-') - - def test_reload_task_result(self): - with self.assertRaises(NotImplementedError): - b.reload_task_result('SOMExx-N0nex1stant-IDxx-') - - def test_reload_group_result(self): - with self.assertRaises(NotImplementedError): - b.reload_group_result('SOMExx-N0nex1stant-IDxx-') - - def test_get_result(self): - with self.assertRaises(NotImplementedError): - b.get_result('SOMExx-N0nex1stant-IDxx-') - - def test_restore_group(self): - with self.assertRaises(NotImplementedError): - b.restore_group('SOMExx-N0nex1stant-IDxx-') - - def test_delete_group(self): - with self.assertRaises(NotImplementedError): - b.delete_group('SOMExx-N0nex1stant-IDxx-') - - def test_save_group(self): - with self.assertRaises(NotImplementedError): - b.save_group('SOMExx-N0nex1stant-IDxx-', 'blergh') - - def test_get_traceback(self): - with self.assertRaises(NotImplementedError): - b.get_traceback('SOMExx-N0nex1stant-IDxx-') + self.b._forget('SOMExx-N0Nex1stant-IDxx-') def test_forget(self): with self.assertRaises(NotImplementedError): - b.forget('SOMExx-N0nex1stant-IDxx-') + self.b.forget('SOMExx-N0nex1stant-IDxx-') def test_on_chord_part_return(self): - b.on_chord_part_return(None) + self.b.on_chord_part_return(None) def test_on_chord_apply(self, unlock='celery.chord_unlock'): - p, current_app.tasks[unlock] = current_app.tasks.get(unlock), Mock() - try: - b.on_chord_apply('dakj221', 'sdokqweok', - result=map(AsyncResult, [1, 2, 3])) - self.assertTrue(current_app.tasks[unlock].apply_async.call_count) - finally: - current_app.tasks[unlock] = p + self.app.tasks[unlock] = Mock() + self.b.on_chord_apply( + 'dakj221', 'sdokqweok', + result=[self.app.AsyncResult(x) for x in [1, 2, 3]], + ) + self.assertTrue(self.app.tasks[unlock].apply_async.call_count) -class test_exception_pickle(Case): +class test_exception_pickle(AppCase): def test_oldstyle(self): if Oldstyle is None: raise SkipTest('py3k does not support old style classes') - self.assertIsNone(fnpe(Oldstyle())) + self.assertTrue(fnpe(Oldstyle())) def test_BaseException(self): self.assertIsNone(fnpe(Exception())) @@ -132,19 +90,22 @@ class test_exception_pickle(Case): self.assertIsNone(fnpe(Impossible())) -class test_prepare_exception(Case): +class test_prepare_exception(AppCase): + + def setup(self): + self.b = BaseBackend(self.app) def test_unpickleable(self): - x = b.prepare_exception(Unpickleable(1, 2, 'foo')) + x = self.b.prepare_exception(Unpickleable(1, 2, 'foo')) self.assertIsInstance(x, KeyError) - y = b.exception_to_python(x) + y = self.b.exception_to_python(x) self.assertIsInstance(y, KeyError) def test_impossible(self): - x = b.prepare_exception(Impossible()) + x = self.b.prepare_exception(Impossible()) self.assertIsInstance(x, UnpickleableExceptionWrapper) self.assertTrue(str(x)) - y = b.exception_to_python(x) + y = self.b.exception_to_python(x) self.assertEqual(y.__class__.__name__, 'Impossible') if sys.version_info < (2, 5): self.assertTrue(y.__class__.__module__) @@ -152,18 +113,18 @@ class test_prepare_exception(Case): self.assertEqual(y.__class__.__module__, 'foo.module') def test_regular(self): - x = b.prepare_exception(KeyError('baz')) + x = self.b.prepare_exception(KeyError('baz')) self.assertIsInstance(x, KeyError) - y = b.exception_to_python(x) + y = self.b.exception_to_python(x) self.assertIsInstance(y, KeyError) class KVBackend(KeyValueStoreBackend): mget_returns_dict = False - def __init__(self, *args, **kwargs): + def __init__(self, app, *args, **kwargs): self.db = {} - super(KVBackend, self).__init__(KeyValueStoreBackend) + super(KVBackend, self).__init__(app) def get(self, key): return self.db.get(key) @@ -181,10 +142,10 @@ class KVBackend(KeyValueStoreBackend): self.db.pop(key, None) -class DictBackend(BaseDictBackend): +class DictBackend(BaseBackend): def __init__(self, *args, **kwargs): - BaseDictBackend.__init__(self, *args, **kwargs) + BaseBackend.__init__(self, *args, **kwargs) self._data = {'can-delete': {'result': 'foo'}} def _restore_group(self, group_id): @@ -199,17 +160,17 @@ class DictBackend(BaseDictBackend): self._data.pop(group_id, None) -class test_BaseDictBackend(Case): +class test_BaseBackend_dict(AppCase): - def setUp(self): - self.b = DictBackend() + def setup(self): + self.b = DictBackend(app=self.app) def test_delete_group(self): self.b.delete_group('can-delete') self.assertNotIn('can-delete', self.b._data) def test_prepare_exception_json(self): - x = DictBackend(serializer='json') + x = DictBackend(self.app, serializer='json') e = x.prepare_exception(KeyError('foo')) self.assertIn('exc_type', e) e = x.exception_to_python(e) @@ -217,13 +178,13 @@ class test_BaseDictBackend(Case): self.assertEqual(str(e), "'foo'") def test_save_group(self): - b = BaseDictBackend() + b = BaseBackend(self.app) b._save_group = Mock() b.save_group('foofoo', 'xxx') b._save_group.assert_called_with('foofoo', 'xxx') def test_forget_interface(self): - b = BaseDictBackend() + b = BaseBackend(self.app) with self.assertRaises(NotImplementedError): b.forget('foo') @@ -244,11 +205,32 @@ class test_BaseDictBackend(Case): self.b.reload_task_result('task-exists') self.b._cache['task-exists'] = {'result': 'task'} + def test_fail_from_current_stack(self): + self.b.mark_as_failure = Mock() + try: + raise KeyError('foo') + except KeyError as exc: + self.b.fail_from_current_stack('task_id') + self.assertTrue(self.b.mark_as_failure.called) + args = self.b.mark_as_failure.call_args[0] + self.assertEqual(args[0], 'task_id') + self.assertIs(args[1], exc) + self.assertTrue(args[2]) -class test_KeyValueStoreBackend(Case): + def test_prepare_value_serializes_group_result(self): + g = self.app.GroupResult('group_id', [self.app.AsyncResult('foo')]) + self.assertIsInstance(self.b.prepare_value(g), (list, tuple)) - def setUp(self): - self.b = KVBackend() + def test_is_cached(self): + self.b._cache['foo'] = 1 + self.assertTrue(self.b.is_cached('foo')) + self.assertFalse(self.b.is_cached('false')) + + +class test_KeyValueStoreBackend(AppCase): + + def setup(self): + self.b = KVBackend(app=self.app) def test_on_chord_part_return(self): assert not self.b.implements_incr @@ -270,14 +252,122 @@ class test_KeyValueStoreBackend(Case): def test_get_many(self): for is_dict in True, False: self.b.mget_returns_dict = is_dict - ids = dict((uuid(), i) for i in xrange(10)) - for id, i in ids.items(): + ids = dict((uuid(), i) for i in range(10)) + for id, i in items(ids): self.b.mark_as_done(id, i) - it = self.b.get_many(ids.keys()) + it = self.b.get_many(list(ids)) for i, (got_id, got_state) in enumerate(it): self.assertEqual(got_state['result'], ids[got_id]) self.assertEqual(i, 9) - self.assertTrue(list(self.b.get_many(ids.keys()))) + self.assertTrue(list(self.b.get_many(list(ids)))) + + def test_get_many_times_out(self): + tasks = [uuid() for _ in range(4)] + self.b._cache[tasks[1]] = {'status': 'PENDING'} + with self.assertRaises(self.b.TimeoutError): + list(self.b.get_many(tasks, timeout=0.01, interval=0.01)) + + def test_chord_part_return_no_gid(self): + self.b.implements_incr = True + task = Mock() + task.request.group = None + self.b.get_key_for_chord = Mock() + self.b.get_key_for_chord.side_effect = AssertionError( + 'should not get here', + ) + self.assertIsNone(self.b.on_chord_part_return(task)) + + @contextmanager + def _chord_part_context(self, b): + + @self.app.task(shared=False) + def callback(result): + pass + + b.implements_incr = True + b.client = Mock() + with patch('celery.result.GroupResult') as GR: + deps = GR.restore.return_value = Mock() + deps.__len__ = Mock() + deps.__len__.return_value = 10 + b.incr = Mock() + b.incr.return_value = 10 + b.expire = Mock() + task = Mock() + task.request.group = 'grid' + cb = task.request.chord = callback.s() + task.request.chord.freeze() + callback.backend = b + callback.backend.fail_from_current_stack = Mock() + yield task, deps, cb + + def test_chord_part_return_propagate_set(self): + with self._chord_part_context(self.b) as (task, deps, _): + self.b.on_chord_part_return(task, propagate=True) + self.assertFalse(self.b.expire.called) + deps.delete.assert_called_with() + deps.join_native.assert_called_with(propagate=True) + + def test_chord_part_return_propagate_default(self): + with self._chord_part_context(self.b) as (task, deps, _): + self.b.on_chord_part_return(task, propagate=None) + self.assertFalse(self.b.expire.called) + deps.delete.assert_called_with() + deps.join_native.assert_called_with( + propagate=self.b.app.conf.CELERY_CHORD_PROPAGATES, + ) + + def test_chord_part_return_join_raises_internal(self): + with self._chord_part_context(self.b) as (task, deps, callback): + deps._failed_join_report = lambda: iter([]) + deps.join_native.side_effect = KeyError('foo') + self.b.on_chord_part_return(task) + self.assertTrue(self.b.fail_from_current_stack.called) + args = self.b.fail_from_current_stack.call_args + exc = args[1]['exc'] + self.assertIsInstance(exc, ChordError) + self.assertIn('foo', str(exc)) + + def test_chord_part_return_join_raises_task(self): + with self._chord_part_context(self.b) as (task, deps, callback): + deps._failed_join_report = lambda: iter([ + self.app.AsyncResult('culprit'), + ]) + deps.join_native.side_effect = KeyError('foo') + self.b.on_chord_part_return(task) + self.assertTrue(self.b.fail_from_current_stack.called) + args = self.b.fail_from_current_stack.call_args + exc = args[1]['exc'] + self.assertIsInstance(exc, ChordError) + self.assertIn('Dependency culprit raised', str(exc)) + + def test_restore_group_from_json(self): + b = KVBackend(serializer='json', app=self.app) + g = self.app.GroupResult( + 'group_id', + [self.app.AsyncResult('a'), self.app.AsyncResult('b')], + ) + b._save_group(g.id, g) + g2 = b._restore_group(g.id)['result'] + self.assertEqual(g2, g) + + def test_restore_group_from_pickle(self): + b = KVBackend(serializer='pickle', app=self.app) + g = self.app.GroupResult( + 'group_id', + [self.app.AsyncResult('a'), self.app.AsyncResult('b')], + ) + b._save_group(g.id, g) + g2 = b._restore_group(g.id)['result'] + self.assertEqual(g2, g) + + def test_chord_apply_fallback(self): + self.b.implements_incr = False + self.b.fallback_chord_unlock = Mock() + self.b.on_chord_apply('group_id', 'body', 'result', foo=1) + self.b.fallback_chord_unlock.assert_called_with( + 'group_id', 'body', 'result', foo=1, + ) def test_get_missing_meta(self): self.assertIsNone(self.b.get_result('xxx-missing')) @@ -285,10 +375,11 @@ class test_KeyValueStoreBackend(Case): def test_save_restore_delete_group(self): tid = uuid() - tsr = GroupResult(tid, [AsyncResult(uuid()) for _ in range(10)]) + tsr = self.app.GroupResult( + tid, [self.app.AsyncResult(uuid()) for _ in range(10)], + ) self.b.save_group(tid, tsr) - stored = self.b.restore_group(tid) - print(stored) + self.b.restore_group(tid) self.assertEqual(self.b.restore_group(tid), tsr) self.b.delete_group(tid) self.assertIsNone(self.b.restore_group(tid)) @@ -297,41 +388,41 @@ class test_KeyValueStoreBackend(Case): self.assertIsNone(self.b.restore_group('xxx-nonexistant')) -class test_KeyValueStoreBackend_interface(Case): +class test_KeyValueStoreBackend_interface(AppCase): def test_get(self): with self.assertRaises(NotImplementedError): - KeyValueStoreBackend().get('a') + KeyValueStoreBackend(self.app).get('a') def test_set(self): with self.assertRaises(NotImplementedError): - KeyValueStoreBackend().set('a', 1) + KeyValueStoreBackend(self.app).set('a', 1) def test_incr(self): with self.assertRaises(NotImplementedError): - KeyValueStoreBackend().incr('a') + KeyValueStoreBackend(self.app).incr('a') def test_cleanup(self): - self.assertFalse(KeyValueStoreBackend().cleanup()) + self.assertFalse(KeyValueStoreBackend(self.app).cleanup()) def test_delete(self): with self.assertRaises(NotImplementedError): - KeyValueStoreBackend().delete('a') + KeyValueStoreBackend(self.app).delete('a') def test_mget(self): with self.assertRaises(NotImplementedError): - KeyValueStoreBackend().mget(['a']) + KeyValueStoreBackend(self.app).mget(['a']) def test_forget(self): with self.assertRaises(NotImplementedError): - KeyValueStoreBackend().forget('a') + KeyValueStoreBackend(self.app).forget('a') -class test_DisabledBackend(Case): +class test_DisabledBackend(AppCase): def test_store_result(self): - DisabledBackend().store_result() + DisabledBackend(self.app).store_result() def test_is_disabled(self): with self.assertRaises(NotImplementedError): - DisabledBackend().get_status('foo') + DisabledBackend(self.app).get_status('foo') diff --git a/awx/lib/site-packages/celery/tests/backends/test_cache.py b/awx/lib/site-packages/celery/tests/backends/test_cache.py index a78aae554d..61eacd4b24 100644 --- a/awx/lib/site-packages/celery/tests/backends/test_cache.py +++ b/awx/lib/site-packages/celery/tests/backends/test_cache.py @@ -1,5 +1,4 @@ from __future__ import absolute_import -from __future__ import with_statement import sys import types @@ -7,17 +6,17 @@ import types from contextlib import contextmanager from kombu.utils.encoding import str_to_bytes -from mock import Mock, patch -from celery import current_app +from celery import signature from celery import states from celery.backends.cache import CacheBackend, DummyClient from celery.exceptions import ImproperlyConfigured -from celery.result import AsyncResult -from celery.task import subtask +from celery.five import items, string, text_t from celery.utils import uuid -from celery.tests.utils import Case, mask_modules, reset_modules +from celery.tests.case import ( + AppCase, Mock, mask_modules, patch, reset_modules, +) class SomeClass(object): @@ -26,12 +25,17 @@ class SomeClass(object): self.data = data -class test_CacheBackend(Case): +class test_CacheBackend(AppCase): - def setUp(self): - self.tb = CacheBackend(backend='memory://') + def setup(self): + self.tb = CacheBackend(backend='memory://', app=self.app) self.tid = uuid() + def test_no_backend(self): + self.app.conf.CELERY_CACHE_BACKEND = None + with self.assertRaises(ImproperlyConfigured): + CacheBackend(backend=None, app=self.app) + def test_mark_as_done(self): self.assertEqual(self.tb.get_status(self.tid), states.PENDING) self.assertIsNone(self.tb.get_result(self.tid)) @@ -51,44 +55,40 @@ class test_CacheBackend(Case): def test_mark_as_failure(self): try: raise KeyError('foo') - except KeyError, exception: + except KeyError as exception: self.tb.mark_as_failure(self.tid, exception) self.assertEqual(self.tb.get_status(self.tid), states.FAILURE) self.assertIsInstance(self.tb.get_result(self.tid), KeyError) def test_on_chord_apply(self): - tb = CacheBackend(backend='memory://') - gid, res = uuid(), [AsyncResult(uuid()) for _ in xrange(3)] + tb = CacheBackend(backend='memory://', app=self.app) + gid, res = uuid(), [self.app.AsyncResult(uuid()) for _ in range(3)] tb.on_chord_apply(gid, {}, result=res) - @patch('celery.result.GroupResult') - def test_on_chord_part_return(self, setresult): - tb = CacheBackend(backend='memory://') + @patch('celery.result.GroupResult.restore') + def test_on_chord_part_return(self, restore): + tb = CacheBackend(backend='memory://', app=self.app) deps = Mock() deps.__len__ = Mock() deps.__len__.return_value = 2 - setresult.restore.return_value = deps + restore.return_value = deps task = Mock() task.name = 'foobarbaz' - try: - current_app.tasks['foobarbaz'] = task - task.request.chord = subtask(task) + self.app.tasks['foobarbaz'] = task + task.request.chord = signature(task) - gid, res = uuid(), [AsyncResult(uuid()) for _ in xrange(3)] - task.request.group = gid - tb.on_chord_apply(gid, {}, result=res) + gid, res = uuid(), [self.app.AsyncResult(uuid()) for _ in range(3)] + task.request.group = gid + tb.on_chord_apply(gid, {}, result=res) - self.assertFalse(deps.join_native.called) - tb.on_chord_part_return(task) - self.assertFalse(deps.join_native.called) + self.assertFalse(deps.join_native.called) + tb.on_chord_part_return(task) + self.assertFalse(deps.join_native.called) - tb.on_chord_part_return(task) - deps.join_native.assert_called_with(propagate=False) - deps.delete.assert_called_with() - - finally: - current_app.tasks.pop('foobarbaz') + tb.on_chord_part_return(task) + deps.join_native.assert_called_with(propagate=True) + deps.delete.assert_called_with() def test_mget(self): self.tb.set('foo', 1) @@ -99,7 +99,7 @@ class test_CacheBackend(Case): def test_forget(self): self.tb.mark_as_done(self.tid, {'foo': 'bar'}) - x = AsyncResult(self.tid, backend=self.tb) + x = self.app.AsyncResult(self.tid, backend=self.tb) x.forget() self.assertIsNone(x.result) @@ -107,12 +107,12 @@ class test_CacheBackend(Case): self.tb.process_cleanup() def test_expires_as_int(self): - tb = CacheBackend(backend='memory://', expires=10) + tb = CacheBackend(backend='memory://', expires=10, app=self.app) self.assertEqual(tb.expires, 10) def test_unknown_backend_raises_ImproperlyConfigured(self): with self.assertRaises(ImproperlyConfigured): - CacheBackend(backend='unknown://') + CacheBackend(backend='unknown://', app=self.app) class MyMemcachedStringEncodingError(Exception): @@ -122,9 +122,9 @@ class MyMemcachedStringEncodingError(Exception): class MemcachedClient(DummyClient): def set(self, key, value, *args, **kwargs): - if isinstance(key, unicode): + if isinstance(key, text_t): raise MyMemcachedStringEncodingError( - 'Keys must be str, not unicode. Convert your unicode ' + 'Keys must be bytes, not string. Convert your ' 'strings using mystring.encode(charset)!') return super(MemcachedClient, self).set(key, value, *args, **kwargs) @@ -157,7 +157,7 @@ class MockCacheMixin(object): sys.modules['pylibmc'] = prev -class test_get_best_memcache(Case, MockCacheMixin): +class test_get_best_memcache(AppCase, MockCacheMixin): def test_pylibmc(self): with self.mock_pylibmc(): @@ -195,11 +195,11 @@ class test_get_best_memcache(Case, MockCacheMixin): def test_backends(self): from celery.backends.cache import backends - for name, fun in backends.items(): + for name, fun in items(backends): self.assertTrue(fun()) -class test_memcache_key(Case, MockCacheMixin): +class test_memcache_key(AppCase, MockCacheMixin): def test_memcache_unicode_key(self): with self.mock_memcache(): @@ -207,8 +207,8 @@ class test_memcache_key(Case, MockCacheMixin): with mask_modules('pylibmc'): from celery.backends import cache cache._imp = [None] - task_id, result = unicode(uuid()), 42 - b = cache.CacheBackend(backend='memcache') + task_id, result = string(uuid()), 42 + b = cache.CacheBackend(backend='memcache', app=self.app) b.store_result(task_id, result, status=states.SUCCESS) self.assertEqual(b.get_result(task_id), result) @@ -219,7 +219,7 @@ class test_memcache_key(Case, MockCacheMixin): from celery.backends import cache cache._imp = [None] task_id, result = str_to_bytes(uuid()), 42 - b = cache.CacheBackend(backend='memcache') + b = cache.CacheBackend(backend='memcache', app=self.app) b.store_result(task_id, result, status=states.SUCCESS) self.assertEqual(b.get_result(task_id), result) @@ -228,8 +228,8 @@ class test_memcache_key(Case, MockCacheMixin): with self.mock_pylibmc(): from celery.backends import cache cache._imp = [None] - task_id, result = unicode(uuid()), 42 - b = cache.CacheBackend(backend='memcache') + task_id, result = string(uuid()), 42 + b = cache.CacheBackend(backend='memcache', app=self.app) b.store_result(task_id, result, status=states.SUCCESS) self.assertEqual(b.get_result(task_id), result) @@ -239,6 +239,6 @@ class test_memcache_key(Case, MockCacheMixin): from celery.backends import cache cache._imp = [None] task_id, result = str_to_bytes(uuid()), 42 - b = cache.CacheBackend(backend='memcache') + b = cache.CacheBackend(backend='memcache', app=self.app) b.store_result(task_id, result, status=states.SUCCESS) self.assertEqual(b.get_result(task_id), result) diff --git a/awx/lib/site-packages/celery/tests/backends/test_cassandra.py b/awx/lib/site-packages/celery/tests/backends/test_cassandra.py index 3965a61d9d..1a43be9efe 100644 --- a/awx/lib/site-packages/celery/tests/backends/test_cassandra.py +++ b/awx/lib/site-packages/celery/tests/backends/test_cassandra.py @@ -1,15 +1,14 @@ from __future__ import absolute_import -from __future__ import with_statement import socket -from mock import Mock from pickle import loads, dumps -from celery import Celery from celery import states from celery.exceptions import ImproperlyConfigured -from celery.tests.utils import AppCase, mock_module +from celery.tests.case import ( + AppCase, Mock, mock_module, depends_on_current_app, +) class Object(object): @@ -47,6 +46,13 @@ def install_exceptions(mod): class test_CassandraBackend(AppCase): + def setup(self): + self.app.conf.update( + CASSANDRA_SERVERS=['example.com'], + CASSANDRA_KEYSPACE='keyspace', + CASSANDRA_COLUMN_FAMILY='columns', + ) + def test_init_no_pycassa(self): with mock_module('pycassa'): from celery.backends import cassandra as mod @@ -57,13 +63,6 @@ class test_CassandraBackend(AppCase): finally: mod.pycassa = prev - def get_app(self): - celery = Celery(set_as_current=False) - celery.conf.CASSANDRA_SERVERS = ['example.com'] - celery.conf.CASSANDRA_KEYSPACE = 'keyspace' - celery.conf.CASSANDRA_COLUMN_FAMILY = 'columns' - return celery - def test_init_with_and_without_LOCAL_QUROM(self): with mock_module('pycassa'): from celery.backends import cassandra as mod @@ -72,23 +71,25 @@ class test_CassandraBackend(AppCase): cons = mod.pycassa.ConsistencyLevel = Object() cons.LOCAL_QUORUM = 'foo' - app = self.get_app() - app.conf.CASSANDRA_READ_CONSISTENCY = 'LOCAL_FOO' - app.conf.CASSANDRA_WRITE_CONSISTENCY = 'LOCAL_FOO' + self.app.conf.CASSANDRA_READ_CONSISTENCY = 'LOCAL_FOO' + self.app.conf.CASSANDRA_WRITE_CONSISTENCY = 'LOCAL_FOO' - mod.CassandraBackend(app=app) + mod.CassandraBackend(app=self.app) cons.LOCAL_FOO = 'bar' - mod.CassandraBackend(app=app) + mod.CassandraBackend(app=self.app) # no servers raises ImproperlyConfigured with self.assertRaises(ImproperlyConfigured): - app.conf.CASSANDRA_SERVERS = None - mod.CassandraBackend(app=app, keyspace='b', column_family='c') + self.app.conf.CASSANDRA_SERVERS = None + mod.CassandraBackend( + app=self.app, keyspace='b', column_family='c', + ) + @depends_on_current_app def test_reduce(self): with mock_module('pycassa'): from celery.backends.cassandra import CassandraBackend - self.assertTrue(loads(dumps(CassandraBackend(app=self.get_app())))) + self.assertTrue(loads(dumps(CassandraBackend(app=self.app)))) def test_get_task_meta_for(self): with mock_module('pycassa'): @@ -97,8 +98,7 @@ class test_CassandraBackend(AppCase): install_exceptions(mod.pycassa) mod.Thrift = Mock() install_exceptions(mod.Thrift) - app = self.get_app() - x = mod.CassandraBackend(app=app) + x = mod.CassandraBackend(app=self.app) Get_Column = x._get_column_family = Mock() get_column = Get_Column.return_value = Mock() get = get_column.get @@ -156,8 +156,7 @@ class test_CassandraBackend(AppCase): install_exceptions(mod.pycassa) mod.Thrift = Mock() install_exceptions(mod.Thrift) - app = self.get_app() - x = mod.CassandraBackend(app=app) + x = mod.CassandraBackend(app=self.app) Get_Column = x._get_column_family = Mock() cf = Get_Column.return_value = Mock() x.detailed_mode = False @@ -172,8 +171,7 @@ class test_CassandraBackend(AppCase): def test_process_cleanup(self): with mock_module('pycassa'): from celery.backends import cassandra as mod - app = self.get_app() - x = mod.CassandraBackend(app=app) + x = mod.CassandraBackend(app=self.app) x._column_family = None x.process_cleanup() @@ -186,8 +184,7 @@ class test_CassandraBackend(AppCase): from celery.backends import cassandra as mod mod.pycassa = Mock() install_exceptions(mod.pycassa) - app = self.get_app() - x = mod.CassandraBackend(app=app) + x = mod.CassandraBackend(app=self.app) self.assertTrue(x._get_column_family()) self.assertIsNotNone(x._column_family) self.assertIs(x._get_column_family(), x._column_family) diff --git a/awx/lib/site-packages/celery/tests/backends/test_couchbase.py b/awx/lib/site-packages/celery/tests/backends/test_couchbase.py new file mode 100644 index 0000000000..96395583d9 --- /dev/null +++ b/awx/lib/site-packages/celery/tests/backends/test_couchbase.py @@ -0,0 +1,136 @@ +from __future__ import absolute_import + +from celery.backends import couchbase as module +from celery.backends.couchbase import CouchBaseBackend +from celery.exceptions import ImproperlyConfigured +from celery import backends +from celery.tests.case import ( + AppCase, MagicMock, Mock, SkipTest, patch, sentinel, +) + +try: + import couchbase +except ImportError: + couchbase = None # noqa + +COUCHBASE_BUCKET = 'celery_bucket' + + +class test_CouchBaseBackend(AppCase): + + def setup(self): + if couchbase is None: + raise SkipTest('couchbase is not installed.') + self.backend = CouchBaseBackend(app=self.app) + + def test_init_no_couchbase(self): + """test init no couchbase raises""" + prev, module.couchbase = module.couchbase, None + try: + with self.assertRaises(ImproperlyConfigured): + CouchBaseBackend(app=self.app) + finally: + module.couchbase = prev + + def test_init_no_settings(self): + """test init no settings""" + self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = [] + with self.assertRaises(ImproperlyConfigured): + CouchBaseBackend(app=self.app) + + def test_init_settings_is_None(self): + """Test init settings is None""" + self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = None + CouchBaseBackend(app=self.app) + + def test_get_connection_connection_exists(self): + with patch('couchbase.connection.Connection') as mock_Connection: + self.backend._connection = sentinel._connection + + connection = self.backend._get_connection() + + self.assertEqual(sentinel._connection, connection) + self.assertFalse(mock_Connection.called) + + def test_get(self): + """test_get + + CouchBaseBackend.get should return and take two params + db conn to couchbase is mocked. + TODO Should test on key not exists + + """ + self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = {} + x = CouchBaseBackend(app=self.app) + x._connection = Mock() + mocked_get = x._connection.get = Mock() + mocked_get.return_value.value = sentinel.retval + # should return None + self.assertEqual(x.get('1f3fab'), sentinel.retval) + x._connection.get.assert_called_once_with('1f3fab') + + def test_set(self): + """test_set + + CouchBaseBackend.set should return None and take two params + db conn to couchbase is mocked. + + """ + self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = None + x = CouchBaseBackend(app=self.app) + x._connection = MagicMock() + x._connection.set = MagicMock() + # should return None + self.assertIsNone(x.set(sentinel.key, sentinel.value)) + + def test_delete(self): + """test_delete + + CouchBaseBackend.delete should return and take two params + db conn to couchbase is mocked. + TODO Should test on key not exists + + """ + self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = {} + x = CouchBaseBackend(app=self.app) + x._connection = Mock() + mocked_delete = x._connection.delete = Mock() + mocked_delete.return_value = None + # should return None + self.assertIsNone(x.delete('1f3fab')) + x._connection.delete.assert_called_once_with('1f3fab') + + def test_config_params(self): + """test_config_params + + celery.conf.CELERY_COUCHBASE_BACKEND_SETTINGS is properly set + """ + self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = { + 'bucket': 'mycoolbucket', + 'host': ['here.host.com', 'there.host.com'], + 'username': 'johndoe', + 'password': 'mysecret', + 'port': '1234', + } + x = CouchBaseBackend(app=self.app) + self.assertEqual(x.bucket, 'mycoolbucket') + self.assertEqual(x.host, ['here.host.com', 'there.host.com'],) + self.assertEqual(x.username, 'johndoe',) + self.assertEqual(x.password, 'mysecret') + self.assertEqual(x.port, 1234) + + def test_backend_by_url(self, url='couchbase://myhost/mycoolbucket'): + from celery.backends.couchbase import CouchBaseBackend + backend, url_ = backends.get_backend_by_url(url, self.app.loader) + self.assertIs(backend, CouchBaseBackend) + self.assertEqual(url_, url) + + def test_backend_params_by_url(self): + url = 'couchbase://johndoe:mysecret@myhost:123/mycoolbucket' + with self.Celery(backend=url) as app: + x = app.backend + self.assertEqual(x.bucket, "mycoolbucket") + self.assertEqual(x.host, "myhost") + self.assertEqual(x.username, "johndoe") + self.assertEqual(x.password, "mysecret") + self.assertEqual(x.port, 123) diff --git a/awx/lib/site-packages/celery/tests/backends/test_database.py b/awx/lib/site-packages/celery/tests/backends/test_database.py index 1dec60f218..fac02215e1 100644 --- a/awx/lib/site-packages/celery/tests/backends/test_database.py +++ b/awx/lib/site-packages/celery/tests/backends/test_database.py @@ -1,21 +1,17 @@ -from __future__ import absolute_import -from __future__ import with_statement - -import sys +from __future__ import absolute_import, unicode_literals from datetime import datetime -from nose import SkipTest from pickle import loads, dumps from celery import states -from celery.app import app_or_default from celery.exceptions import ImproperlyConfigured -from celery.result import AsyncResult from celery.utils import uuid -from celery.tests.utils import ( - Case, +from celery.tests.case import ( + AppCase, + SkipTest, + depends_on_current_app, mask_modules, skip_if_pypy, skip_if_jython, @@ -24,9 +20,9 @@ from celery.tests.utils import ( try: import sqlalchemy # noqa except ImportError: - DatabaseBackend = Task = TaskSet = None # noqa + DatabaseBackend = Task = TaskSet = retry = None # noqa else: - from celery.backends.database import DatabaseBackend + from celery.backends.database import DatabaseBackend, retry from celery.backends.database.models import Task, TaskSet @@ -36,13 +32,28 @@ class SomeClass(object): self.data = data -class test_DatabaseBackend(Case): +class test_DatabaseBackend(AppCase): @skip_if_pypy @skip_if_jython - def setUp(self): + def setup(self): if DatabaseBackend is None: raise SkipTest('sqlalchemy not installed') + self.uri = 'sqlite:///test.db' + + def test_retry_helper(self): + from celery.backends.database import OperationalError + + calls = [0] + + @retry + def raises(): + calls[0] += 1 + raise OperationalError(1, 2, 3) + + with self.assertRaises(OperationalError): + raises(max_retries=5) + self.assertEqual(calls[0], 5) def test_missing_SQLAlchemy_raises_ImproperlyConfigured(self): with mask_modules('sqlalchemy'): @@ -50,47 +61,26 @@ class test_DatabaseBackend(Case): with self.assertRaises(ImproperlyConfigured): _sqlalchemy_installed() - def test_pickle_hack_for_sqla_05(self): - import sqlalchemy as sa - from celery.backends.database import session - prev_base = session.ResultModelBase - prev_ver, sa.__version__ = sa.__version__, '0.5.0' - prev_models = sys.modules.pop('celery.backends.database.models', None) - try: - from sqlalchemy.ext.declarative import declarative_base - session.ResultModelBase = declarative_base() - from celery.backends.database.dfd042c7 import PickleType as Type1 - from celery.backends.database.models import PickleType as Type2 - self.assertIs(Type1, Type2) - finally: - sys.modules['celery.backends.database.models'] = prev_models - sa.__version__ = prev_ver - session.ResultModelBase = prev_base - def test_missing_dburi_raises_ImproperlyConfigured(self): - conf = app_or_default().conf - prev, conf.CELERY_RESULT_DBURI = conf.CELERY_RESULT_DBURI, None - try: - with self.assertRaises(ImproperlyConfigured): - DatabaseBackend() - finally: - conf.CELERY_RESULT_DBURI = prev + self.app.conf.CELERY_RESULT_DBURI = None + with self.assertRaises(ImproperlyConfigured): + DatabaseBackend(app=self.app) def test_missing_task_id_is_PENDING(self): - tb = DatabaseBackend() + tb = DatabaseBackend(self.uri, app=self.app) self.assertEqual(tb.get_status('xxx-does-not-exist'), states.PENDING) def test_missing_task_meta_is_dict_with_pending(self): - tb = DatabaseBackend() + tb = DatabaseBackend(self.uri, app=self.app) self.assertDictContainsSubset({ 'status': states.PENDING, 'task_id': 'xxx-does-not-exist-at-all', 'result': None, - 'traceback': None, + 'traceback': None }, tb.get_task_meta('xxx-does-not-exist-at-all')) def test_mark_as_done(self): - tb = DatabaseBackend() + tb = DatabaseBackend(self.uri, app=self.app) tid = uuid() @@ -102,7 +92,7 @@ class test_DatabaseBackend(Case): self.assertEqual(tb.get_result(tid), 42) def test_is_pickled(self): - tb = DatabaseBackend() + tb = DatabaseBackend(self.uri, app=self.app) tid2 = uuid() result = {'foo': 'baz', 'bar': SomeClass(12345)} @@ -113,23 +103,23 @@ class test_DatabaseBackend(Case): self.assertEqual(rindb.get('bar').data, 12345) def test_mark_as_started(self): - tb = DatabaseBackend() + tb = DatabaseBackend(self.uri, app=self.app) tid = uuid() tb.mark_as_started(tid) self.assertEqual(tb.get_status(tid), states.STARTED) def test_mark_as_revoked(self): - tb = DatabaseBackend() + tb = DatabaseBackend(self.uri, app=self.app) tid = uuid() tb.mark_as_revoked(tid) self.assertEqual(tb.get_status(tid), states.REVOKED) def test_mark_as_retry(self): - tb = DatabaseBackend() + tb = DatabaseBackend(self.uri, app=self.app) tid = uuid() try: raise KeyError('foo') - except KeyError, exception: + except KeyError as exception: import traceback trace = '\n'.join(traceback.format_stack()) tb.mark_as_retry(tid, exception, traceback=trace) @@ -138,12 +128,12 @@ class test_DatabaseBackend(Case): self.assertEqual(tb.get_traceback(tid), trace) def test_mark_as_failure(self): - tb = DatabaseBackend() + tb = DatabaseBackend(self.uri, app=self.app) tid3 = uuid() try: raise KeyError('foo') - except KeyError, exception: + except KeyError as exception: import traceback trace = '\n'.join(traceback.format_stack()) tb.mark_as_failure(tid3, exception, traceback=trace) @@ -152,27 +142,28 @@ class test_DatabaseBackend(Case): self.assertEqual(tb.get_traceback(tid3), trace) def test_forget(self): - tb = DatabaseBackend(backend='memory://') + tb = DatabaseBackend(self.uri, backend='memory://', app=self.app) tid = uuid() tb.mark_as_done(tid, {'foo': 'bar'}) tb.mark_as_done(tid, {'foo': 'bar'}) - x = AsyncResult(tid, backend=tb) + x = self.app.AsyncResult(tid, backend=tb) x.forget() self.assertIsNone(x.result) def test_process_cleanup(self): - tb = DatabaseBackend() + tb = DatabaseBackend(self.uri, app=self.app) tb.process_cleanup() + @depends_on_current_app def test_reduce(self): - tb = DatabaseBackend() + tb = DatabaseBackend(self.uri, app=self.app) self.assertTrue(loads(dumps(tb))) def test_save__restore__delete_group(self): - tb = DatabaseBackend() + tb = DatabaseBackend(self.uri, app=self.app) tid = uuid() - res = {u'something': 'special'} + res = {'something': 'special'} self.assertEqual(tb.save_group(tid, res), res) res2 = tb.restore_group(tid) @@ -184,7 +175,7 @@ class test_DatabaseBackend(Case): self.assertIsNone(tb.restore_group('xxx-nonexisting-id')) def test_cleanup(self): - tb = DatabaseBackend() + tb = DatabaseBackend(self.uri, app=self.app) for i in range(10): tb.mark_as_done(uuid(), 42) tb.save_group(uuid(), {'foo': 'bar'}) diff --git a/awx/lib/site-packages/celery/tests/backends/test_mongodb.py b/awx/lib/site-packages/celery/tests/backends/test_mongodb.py index 3c15ab40db..cbd80dd1bd 100644 --- a/awx/lib/site-packages/celery/tests/backends/test_mongodb.py +++ b/awx/lib/site-packages/celery/tests/backends/test_mongodb.py @@ -1,19 +1,18 @@ from __future__ import absolute_import -from __future__ import with_statement import datetime import uuid -from mock import MagicMock, Mock, patch, sentinel -from nose import SkipTest from pickle import loads, dumps -from celery import Celery from celery import states from celery.backends import mongodb as module from celery.backends.mongodb import MongoBackend, Bunch, pymongo from celery.exceptions import ImproperlyConfigured -from celery.tests.utils import AppCase +from celery.tests.case import ( + AppCase, MagicMock, Mock, SkipTest, + depends_on_current_app, patch, sentinel, +) COLLECTION = 'taskmeta_celery' TASK_ID = str(uuid.uuid1()) @@ -27,7 +26,7 @@ MONGODB_COLLECTION = 'collection1' class test_MongoBackend(AppCase): - def setUp(self): + def setup(self): if pymongo is None: raise SkipTest('pymongo is not installed.') @@ -37,9 +36,9 @@ class test_MongoBackend(AppCase): R['Binary'], module.Binary = module.Binary, Mock() R['datetime'], datetime.datetime = datetime.datetime, Mock() - self.backend = MongoBackend() + self.backend = MongoBackend(app=self.app) - def tearDown(self): + def teardown(self): MongoBackend.encode = self._reset['encode'] MongoBackend.decode = self._reset['decode'] module.Binary = self._reset['Binary'] @@ -54,74 +53,75 @@ class test_MongoBackend(AppCase): prev, module.pymongo = module.pymongo, None try: with self.assertRaises(ImproperlyConfigured): - MongoBackend() + MongoBackend(app=self.app) finally: module.pymongo = prev def test_init_no_settings(self): - celery = Celery(set_as_current=False) - celery.conf.CELERY_MONGODB_BACKEND_SETTINGS = [] + self.app.conf.CELERY_MONGODB_BACKEND_SETTINGS = [] with self.assertRaises(ImproperlyConfigured): - MongoBackend(app=celery) + MongoBackend(app=self.app) def test_init_settings_is_None(self): - celery = Celery(set_as_current=False) - celery.conf.CELERY_MONGODB_BACKEND_SETTINGS = None - MongoBackend(app=celery) + self.app.conf.CELERY_MONGODB_BACKEND_SETTINGS = None + MongoBackend(app=self.app) def test_restore_group_no_entry(self): - x = MongoBackend() + x = MongoBackend(app=self.app) x.collection = Mock() fo = x.collection.find_one = Mock() fo.return_value = None self.assertIsNone(x._restore_group('1f3fab')) + @depends_on_current_app def test_reduce(self): - x = MongoBackend() + x = MongoBackend(app=self.app) self.assertTrue(loads(dumps(x))) def test_get_connection_connection_exists(self): - with patch('pymongo.connection.Connection') as mock_Connection: + with patch('pymongo.MongoClient') as mock_Connection: self.backend._connection = sentinel._connection connection = self.backend._get_connection() - self.assertEquals(sentinel._connection, connection) + self.assertEqual(sentinel._connection, connection) self.assertFalse(mock_Connection.called) def test_get_connection_no_connection_host(self): - with patch('pymongo.connection.Connection') as mock_Connection: + with patch('pymongo.MongoClient') as mock_Connection: self.backend._connection = None - self.backend.mongodb_host = MONGODB_HOST - self.backend.mongodb_port = MONGODB_PORT + self.backend.host = MONGODB_HOST + self.backend.port = MONGODB_PORT mock_Connection.return_value = sentinel.connection connection = self.backend._get_connection() mock_Connection.assert_called_once_with( - MONGODB_HOST, MONGODB_PORT, max_pool_size=10) - self.assertEquals(sentinel.connection, connection) + host='mongodb://localhost:27017', ssl=False, max_pool_size=10, + auto_start_request=False) + self.assertEqual(sentinel.connection, connection) def test_get_connection_no_connection_mongodb_uri(self): - with patch('pymongo.connection.Connection') as mock_Connection: + with patch('pymongo.MongoClient') as mock_Connection: mongodb_uri = 'mongodb://%s:%d' % (MONGODB_HOST, MONGODB_PORT) self.backend._connection = None - self.backend.mongodb_host = mongodb_uri + self.backend.host = mongodb_uri mock_Connection.return_value = sentinel.connection connection = self.backend._get_connection() mock_Connection.assert_called_once_with( - mongodb_uri, max_pool_size=10) - self.assertEquals(sentinel.connection, connection) + host=mongodb_uri, ssl=False, max_pool_size=10, + auto_start_request=False) + self.assertEqual(sentinel.connection, connection) @patch('celery.backends.mongodb.MongoBackend._get_connection') def test_get_database_no_existing(self, mock_get_connection): # Should really check for combinations of these two, to be complete. - self.backend.mongodb_user = MONGODB_USER - self.backend.mongodb_password = MONGODB_PASSWORD + self.backend.user = MONGODB_USER + self.backend.password = MONGODB_PASSWORD mock_database = Mock() mock_connection = MagicMock(spec=['__getitem__']) @@ -138,8 +138,8 @@ class test_MongoBackend(AppCase): @patch('celery.backends.mongodb.MongoBackend._get_connection') def test_get_database_no_existing_no_auth(self, mock_get_connection): # Should really check for combinations of these two, to be complete. - self.backend.mongodb_user = None - self.backend.mongodb_password = None + self.backend.user = None + self.backend.password = None mock_database = Mock() mock_connection = MagicMock(spec=['__getitem__']) @@ -155,15 +155,15 @@ class test_MongoBackend(AppCase): def test_process_cleanup(self): self.backend._connection = None self.backend.process_cleanup() - self.assertEquals(self.backend._connection, None) + self.assertEqual(self.backend._connection, None) self.backend._connection = 'not none' self.backend.process_cleanup() - self.assertEquals(self.backend._connection, None) + self.assertEqual(self.backend._connection, None) @patch('celery.backends.mongodb.MongoBackend._get_database') def test_store_result(self, mock_get_database): - self.backend.mongodb_taskmeta_collection = MONGODB_COLLECTION + self.backend.taskmeta_collection = MONGODB_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() @@ -177,12 +177,12 @@ class test_MongoBackend(AppCase): mock_get_database.assert_called_once_with() mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) mock_collection.save.assert_called_once() - self.assertEquals(sentinel.result, ret_val) + self.assertEqual(sentinel.result, ret_val) @patch('celery.backends.mongodb.MongoBackend._get_database') def test_get_task_meta_for(self, mock_get_database): datetime.datetime = self._reset['datetime'] - self.backend.mongodb_taskmeta_collection = MONGODB_COLLECTION + self.backend.taskmeta_collection = MONGODB_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() @@ -195,14 +195,14 @@ class test_MongoBackend(AppCase): mock_get_database.assert_called_once_with() mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) - self.assertEquals( + self.assertEqual( ['status', 'task_id', 'date_done', 'traceback', 'result', 'children'], - ret_val.keys()) + list(ret_val.keys())) @patch('celery.backends.mongodb.MongoBackend._get_database') def test_get_task_meta_for_no_result(self, mock_get_database): - self.backend.mongodb_taskmeta_collection = MONGODB_COLLECTION + self.backend.taskmeta_collection = MONGODB_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() @@ -215,11 +215,11 @@ class test_MongoBackend(AppCase): mock_get_database.assert_called_once_with() mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) - self.assertEquals({'status': states.PENDING, 'result': None}, ret_val) + self.assertEqual({'status': states.PENDING, 'result': None}, ret_val) @patch('celery.backends.mongodb.MongoBackend._get_database') def test_save_group(self, mock_get_database): - self.backend.mongodb_taskmeta_collection = MONGODB_COLLECTION + self.backend.taskmeta_collection = MONGODB_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() @@ -233,11 +233,11 @@ class test_MongoBackend(AppCase): mock_get_database.assert_called_once_with() mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) mock_collection.save.assert_called_once() - self.assertEquals(sentinel.result, ret_val) + self.assertEqual(sentinel.result, ret_val) @patch('celery.backends.mongodb.MongoBackend._get_database') def test_restore_group(self, mock_get_database): - self.backend.mongodb_taskmeta_collection = MONGODB_COLLECTION + self.backend.taskmeta_collection = MONGODB_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() @@ -252,11 +252,14 @@ class test_MongoBackend(AppCase): mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) mock_collection.find_one.assert_called_once_with( {'_id': sentinel.taskset_id}) - self.assertEquals(['date_done', 'result', 'task_id'], ret_val.keys()) + self.assertEqual( + ['date_done', 'result', 'task_id'], + list(ret_val.keys()), + ) @patch('celery.backends.mongodb.MongoBackend._get_database') def test_delete_group(self, mock_get_database): - self.backend.mongodb_taskmeta_collection = MONGODB_COLLECTION + self.backend.taskmeta_collection = MONGODB_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() @@ -273,7 +276,7 @@ class test_MongoBackend(AppCase): @patch('celery.backends.mongodb.MongoBackend._get_database') def test_forget(self, mock_get_database): - self.backend.mongodb_taskmeta_collection = MONGODB_COLLECTION + self.backend.taskmeta_collection = MONGODB_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() @@ -292,7 +295,7 @@ class test_MongoBackend(AppCase): @patch('celery.backends.mongodb.MongoBackend._get_database') def test_cleanup(self, mock_get_database): datetime.datetime = self._reset['datetime'] - self.backend.mongodb_taskmeta_collection = MONGODB_COLLECTION + self.backend.taskmeta_collection = MONGODB_COLLECTION mock_database = MagicMock(spec=['__getitem__', '__setitem__']) mock_collection = Mock() @@ -309,13 +312,13 @@ class test_MongoBackend(AppCase): mock_collection.assert_called_once() def test_get_database_authfailure(self): - x = MongoBackend() + x = MongoBackend(app=self.app) x._get_connection = Mock() conn = x._get_connection.return_value = {} - db = conn[x.mongodb_database] = Mock() + db = conn[x.database_name] = Mock() db.authenticate.return_value = False - x.mongodb_user = 'jerry' - x.mongodb_password = 'cere4l' + x.user = 'jerry' + x.password = 'cere4l' with self.assertRaises(ImproperlyConfigured): x._get_database() db.authenticate.assert_called_with('jerry', 'cere4l') diff --git a/awx/lib/site-packages/celery/tests/backends/test_redis.py b/awx/lib/site-packages/celery/tests/backends/test_redis.py index 4281ff51a8..e87df94acf 100644 --- a/awx/lib/site-packages/celery/tests/backends/test_redis.py +++ b/awx/lib/site-packages/celery/tests/backends/test_redis.py @@ -1,23 +1,20 @@ from __future__ import absolute_import -from __future__ import with_statement from datetime import timedelta -from mock import Mock, patch -from nose import SkipTest from pickle import loads, dumps from kombu.utils import cached_property, uuid -from celery import current_app +from celery import signature from celery import states from celery.datastructures import AttributeDict from celery.exceptions import ImproperlyConfigured -from celery.result import AsyncResult -from celery.task import subtask from celery.utils.timeutils import timedelta_seconds -from celery.tests.utils import Case +from celery.tests.case import ( + AppCase, Mock, SkipTest, depends_on_current_app, patch, +) class Redis(object): @@ -66,7 +63,7 @@ class redis(object): pass -class test_RedisBackend(Case): +class test_RedisBackend(AppCase): def get_backend(self): from celery.backends import redis @@ -76,7 +73,7 @@ class test_RedisBackend(Case): return RedisBackend - def setUp(self): + def setup(self): self.Backend = self.get_backend() class MockBackend(self.Backend): @@ -87,10 +84,11 @@ class test_RedisBackend(Case): self.MockBackend = MockBackend + @depends_on_current_app def test_reduce(self): try: from celery.backends.redis import RedisBackend - x = RedisBackend() + x = RedisBackend(app=self.app) self.assertTrue(loads(dumps(x))) except ImportError: raise SkipTest('redis not installed') @@ -98,92 +96,85 @@ class test_RedisBackend(Case): def test_no_redis(self): self.MockBackend.redis = None with self.assertRaises(ImproperlyConfigured): - self.MockBackend() + self.MockBackend(app=self.app) def test_url(self): - x = self.MockBackend('redis://foobar//1') + x = self.MockBackend('redis://foobar//1', app=self.app) self.assertEqual(x.host, 'foobar') self.assertEqual(x.db, '1') def test_conf_raises_KeyError(self): - conf = AttributeDict({'CELERY_RESULT_SERIALIZER': 'json', - 'CELERY_MAX_CACHED_RESULTS': 1, - 'CELERY_TASK_RESULT_EXPIRES': None}) - prev, current_app.conf = current_app.conf, conf - try: - self.MockBackend() - finally: - current_app.conf = prev + self.app.conf = AttributeDict({ + 'CELERY_RESULT_SERIALIZER': 'json', + 'CELERY_MAX_CACHED_RESULTS': 1, + 'CELERY_ACCEPT_CONTENT': ['json'], + 'CELERY_TASK_RESULT_EXPIRES': None, + }) + self.MockBackend(app=self.app) def test_expires_defaults_to_config(self): - conf = current_app.conf - prev = conf.CELERY_TASK_RESULT_EXPIRES - conf.CELERY_TASK_RESULT_EXPIRES = 10 - try: - b = self.Backend(expires=None) - self.assertEqual(b.expires, 10) - finally: - conf.CELERY_TASK_RESULT_EXPIRES = prev + self.app.conf.CELERY_TASK_RESULT_EXPIRES = 10 + b = self.Backend(expires=None, app=self.app) + self.assertEqual(b.expires, 10) def test_expires_is_int(self): - b = self.Backend(expires=48) + b = self.Backend(expires=48, app=self.app) self.assertEqual(b.expires, 48) def test_expires_is_None(self): - b = self.Backend(expires=None) + b = self.Backend(expires=None, app=self.app) self.assertEqual(b.expires, timedelta_seconds( - current_app.conf.CELERY_TASK_RESULT_EXPIRES)) + self.app.conf.CELERY_TASK_RESULT_EXPIRES)) def test_expires_is_timedelta(self): - b = self.Backend(expires=timedelta(minutes=1)) + b = self.Backend(expires=timedelta(minutes=1), app=self.app) self.assertEqual(b.expires, 60) def test_on_chord_apply(self): - self.Backend().on_chord_apply('group_id', {}, - result=map(AsyncResult, [1, 2, 3])) + self.Backend(app=self.app).on_chord_apply( + 'group_id', {}, + result=[self.app.AsyncResult(x) for x in [1, 2, 3]], + ) def test_mget(self): - b = self.MockBackend() + b = self.MockBackend(app=self.app) self.assertTrue(b.mget(['a', 'b', 'c'])) b.client.mget.assert_called_with(['a', 'b', 'c']) def test_set_no_expire(self): - b = self.MockBackend() + b = self.MockBackend(app=self.app) b.expires = None b.set('foo', 'bar') - @patch('celery.result.GroupResult') - def test_on_chord_part_return(self, setresult): - b = self.MockBackend() + @patch('celery.result.GroupResult.restore') + def test_on_chord_part_return(self, restore): + b = self.MockBackend(app=self.app) deps = Mock() deps.__len__ = Mock() deps.__len__.return_value = 10 - setresult.restore.return_value = deps + restore.return_value = deps b.client.incr.return_value = 1 task = Mock() task.name = 'foobarbaz' - try: - current_app.tasks['foobarbaz'] = task - task.request.chord = subtask(task) - task.request.group = 'group_id' + self.app.tasks['foobarbaz'] = task + task.request.chord = signature(task) + task.request.group = 'group_id' - b.on_chord_part_return(task) - self.assertTrue(b.client.incr.call_count) + b.on_chord_part_return(task) + self.assertTrue(b.client.incr.call_count) - b.client.incr.return_value = len(deps) - b.on_chord_part_return(task) - deps.join_native.assert_called_with(propagate=False) - deps.delete.assert_called_with() + b.client.incr.return_value = len(deps) + b.on_chord_part_return(task) + deps.join_native.assert_called_with(propagate=True) + deps.delete.assert_called_with() - self.assertTrue(b.client.expire.call_count) - finally: - current_app.tasks.pop('foobarbaz') + self.assertTrue(b.client.expire.call_count) def test_process_cleanup(self): - self.Backend().process_cleanup() + self.Backend(app=self.app).process_cleanup() def test_get_set_forget(self): - b = self.Backend() + b = self.Backend(app=self.app) tid = uuid() b.store_result(tid, 42, states.SUCCESS) self.assertEqual(b.get_status(tid), states.SUCCESS) @@ -192,7 +183,7 @@ class test_RedisBackend(Case): self.assertEqual(b.get_status(tid), states.PENDING) def test_set_expires(self): - b = self.Backend(expires=512) + b = self.Backend(expires=512, app=self.app) tid = uuid() key = b.get_key_for_task(tid) b.store_result(tid, 42, states.SUCCESS) diff --git a/awx/lib/site-packages/celery/tests/backends/test_rpc.py b/awx/lib/site-packages/celery/tests/backends/test_rpc.py new file mode 100644 index 0000000000..6fe594c19d --- /dev/null +++ b/awx/lib/site-packages/celery/tests/backends/test_rpc.py @@ -0,0 +1,75 @@ +from __future__ import absolute_import + +from celery.backends.rpc import RPCBackend +from celery._state import _task_stack + +from celery.tests.case import AppCase, Mock, patch + + +class test_RPCBackend(AppCase): + + def setup(self): + self.b = RPCBackend(app=self.app) + + def test_oid(self): + oid = self.b.oid + oid2 = self.b.oid + self.assertEqual(oid, oid2) + self.assertEqual(oid, self.app.oid) + + def test_interface(self): + self.b.on_reply_declare('task_id') + + def test_destination_for(self): + req = Mock(name='request') + req.reply_to = 'reply_to' + req.correlation_id = 'corid' + self.assertTupleEqual( + self.b.destination_for('task_id', req), + ('reply_to', 'corid'), + ) + task = Mock() + _task_stack.push(task) + try: + task.request.reply_to = 'reply_to' + task.request.correlation_id = 'corid' + self.assertTupleEqual( + self.b.destination_for('task_id', None), + ('reply_to', 'corid'), + ) + finally: + _task_stack.pop() + + with self.assertRaises(RuntimeError): + self.b.destination_for('task_id', None) + + def test_binding(self): + queue = self.b.binding + self.assertEqual(queue.name, self.b.oid) + self.assertEqual(queue.exchange, self.b.exchange) + self.assertEqual(queue.routing_key, self.b.oid) + self.assertFalse(queue.durable) + self.assertFalse(queue.auto_delete) + + def test_many_bindings(self): + self.assertListEqual( + self.b._many_bindings(['a', 'b']), + [self.b.binding], + ) + + def test_create_binding(self): + self.assertEqual(self.b._create_binding('id'), self.b.binding) + + def test_on_task_call(self): + with patch('celery.backends.rpc.maybe_declare') as md: + with self.app.amqp.producer_pool.acquire() as prod: + self.b.on_task_call(prod, 'task_id'), + md.assert_called_with( + self.b.binding(prod.channel), + retry=True, + ) + + def test_create_exchange(self): + ex = self.b._create_exchange('name') + self.assertIsInstance(ex, self.b.Exchange) + self.assertEqual(ex.name, '') diff --git a/awx/lib/site-packages/celery/tests/bin/proj/__init__.py b/awx/lib/site-packages/celery/tests/bin/proj/__init__.py new file mode 100644 index 0000000000..ffe8fb0693 --- /dev/null +++ b/awx/lib/site-packages/celery/tests/bin/proj/__init__.py @@ -0,0 +1,5 @@ +from __future__ import absolute_import + +from celery import Celery + +hello = Celery(set_as_current=False) diff --git a/awx/lib/site-packages/celery/tests/bin/proj/app.py b/awx/lib/site-packages/celery/tests/bin/proj/app.py new file mode 100644 index 0000000000..f1fb15e2e4 --- /dev/null +++ b/awx/lib/site-packages/celery/tests/bin/proj/app.py @@ -0,0 +1,5 @@ +from __future__ import absolute_import + +from celery import Celery + +app = Celery(set_as_current=False) diff --git a/awx/lib/site-packages/celery/tests/bin/test_camqadm.py b/awx/lib/site-packages/celery/tests/bin/test_amqp.py similarity index 80% rename from awx/lib/site-packages/celery/tests/bin/test_camqadm.py rename to awx/lib/site-packages/celery/tests/bin/test_amqp.py index b0b945a046..8840a9f102 100644 --- a/awx/lib/site-packages/celery/tests/bin/test_camqadm.py +++ b/awx/lib/site-packages/celery/tests/bin/test_amqp.py @@ -1,26 +1,20 @@ from __future__ import absolute_import -from __future__ import with_statement -from mock import Mock, patch - -from celery import Celery -from celery.bin.camqadm import ( +from celery.bin.amqp import ( AMQPAdmin, AMQShell, dump_message, - AMQPAdminCommand, - camqadm, + amqp, main, ) -from celery.tests.utils import AppCase, WhateverIO +from celery.tests.case import AppCase, Mock, WhateverIO, patch class test_AMQShell(AppCase): def setup(self): self.fh = WhateverIO() - self.app = Celery(broker='memory://', set_as_current=False) self.adm = self.create_adm() self.shell = AMQShell(connect=self.adm.connect, out=self.fh) @@ -38,6 +32,11 @@ class test_AMQShell(AppCase): def RV(self): raise Exception(self.fh.getvalue()) + def test_spec_format_response(self): + spec = self.shell.amqp['exchange.declare'] + self.assertEqual(spec.format_response(None), 'ok.') + self.assertEqual(spec.format_response('NO'), 'NO') + def test_missing_namespace(self): self.shell.onecmd('ns.cmd arg') self.assertIn('unknown syntax', self.fh.getvalue()) @@ -54,6 +53,15 @@ class test_AMQShell(AppCase): self.shell.onecmd('help foo.baz') self.assertIn('unknown syntax', self.fh.getvalue()) + def test_onecmd_error(self): + self.shell.dispatch = Mock() + self.shell.dispatch.side_effect = MemoryError() + self.shell.say = Mock() + self.assertFalse(self.shell.needs_reconnect) + self.shell.onecmd('hello') + self.assertTrue(self.shell.say.called) + self.assertTrue(self.shell.needs_reconnect) + def test_exit(self): with self.assertRaises(SystemExit): self.shell.onecmd('exit') @@ -75,8 +83,8 @@ class test_AMQShell(AppCase): ['queue.declare'], ) self.assertEqual( - self.shell.completenames('declare'), - ['queue.declare', 'exchange.declare'], + sorted(self.shell.completenames('declare')), + sorted(['queue.declare', 'exchange.declare']), ) def test_empty_line(self): @@ -132,25 +140,14 @@ class test_AMQShell(AppCase): a.run() self.assertIn('bibi', self.fh.getvalue()) - @patch('celery.bin.camqadm.AMQPAdminCommand') + @patch('celery.bin.amqp.amqp') def test_main(self, Command): c = Command.return_value = Mock() main() c.execute_from_commandline.assert_called_with() - @patch('celery.bin.camqadm.AMQPAdmin') - def test_camqadm(self, cls): - c = cls.return_value = Mock() - camqadm() - c.run.assert_called_with() - - @patch('celery.bin.camqadm.AMQPAdmin') - def test_AMQPAdminCommand(self, cls): - c = cls.return_value = Mock() - camqadm() - c.run.assert_called_with() - - x = AMQPAdminCommand(app=self.app) + @patch('celery.bin.amqp.AMQPAdmin') + def test_command(self, cls): + x = amqp(app=self.app) x.run() self.assertIs(cls.call_args[1]['app'], self.app) - c.run.assert_called_with() diff --git a/awx/lib/site-packages/celery/tests/bin/test_base.py b/awx/lib/site-packages/celery/tests/bin/test_base.py index ae3f13917a..fc38d50ad5 100644 --- a/awx/lib/site-packages/celery/tests/bin/test_base.py +++ b/awx/lib/site-packages/celery/tests/bin/test_base.py @@ -1,12 +1,16 @@ from __future__ import absolute_import -from __future__ import with_statement import os -from mock import patch - -from celery.bin.base import Command, Option -from celery.tests.utils import AppCase, override_stdouts +from celery.bin.base import ( + Command, + Option, + Extensions, + HelpFormatter, +) +from celery.tests.case import ( + AppCase, Mock, depends_on_current_app, override_stdouts, patch, +) class Object(object): @@ -14,7 +18,7 @@ class Object(object): class MyApp(object): - pass + user_options = {'preload': None} APP = MyApp() # <-- Used by test_with_custom_app @@ -22,7 +26,7 @@ APP = MyApp() # <-- Used by test_with_custom_app class MockCommand(Command): mock_args = ('arg1', 'arg2', 'arg3') - def parse_options(self, prog_name, arguments): + def parse_options(self, prog_name, arguments, command=None): options = Object() options.foo = 'bar' options.prog_name = prog_name @@ -32,6 +36,47 @@ class MockCommand(Command): return args, kwargs +class test_Extensions(AppCase): + + def test_load(self): + with patch('pkg_resources.iter_entry_points') as iterep: + with patch('celery.bin.base.symbol_by_name') as symbyname: + ep = Mock() + ep.name = 'ep' + ep.module_name = 'foo' + ep.attrs = ['bar', 'baz'] + iterep.return_value = [ep] + cls = symbyname.return_value = Mock() + register = Mock() + e = Extensions('unit', register) + e.load() + symbyname.assert_called_with('foo:bar') + register.assert_called_with(cls, name='ep') + + with patch('celery.bin.base.symbol_by_name') as symbyname: + symbyname.side_effect = SyntaxError() + with patch('warnings.warn') as warn: + e.load() + self.assertTrue(warn.called) + + with patch('celery.bin.base.symbol_by_name') as symbyname: + symbyname.side_effect = KeyError('foo') + with self.assertRaises(KeyError): + e.load() + + +class test_HelpFormatter(AppCase): + + def test_format_epilog(self): + f = HelpFormatter() + self.assertTrue(f.format_epilog('hello')) + self.assertFalse(f.format_epilog('')) + + def test_format_description(self): + f = HelpFormatter() + self.assertTrue(f.format_description('hello')) + + class test_Command(AppCase): def test_get_options(self): @@ -39,6 +84,48 @@ class test_Command(AppCase): cmd.option_list = (1, 2, 3) self.assertTupleEqual(cmd.get_options(), (1, 2, 3)) + def test_custom_description(self): + + class C(Command): + description = 'foo' + + c = C() + self.assertEqual(c.description, 'foo') + + def test_register_callbacks(self): + c = Command(on_error=8, on_usage_error=9) + self.assertEqual(c.on_error, 8) + self.assertEqual(c.on_usage_error, 9) + + def test_run_raises_UsageError(self): + cb = Mock() + c = Command(on_usage_error=cb) + c.verify_args = Mock() + c.run = Mock() + exc = c.run.side_effect = c.UsageError('foo', status=3) + + self.assertEqual(c(), exc.status) + cb.assert_called_with(exc) + c.verify_args.assert_called_with(()) + + def test_default_on_usage_error(self): + cmd = Command() + cmd.handle_error = Mock() + exc = Exception() + cmd.on_usage_error(exc) + cmd.handle_error.assert_called_with(exc) + + def test_verify_args_missing(self): + c = Command() + + def run(a, b, c): + pass + c.run = run + + with self.assertRaises(c.UsageError): + c.verify_args((1, )) + c.verify_args((1, 2, 3)) + def test_run_interface(self): with self.assertRaises(NotImplementedError): Command().run() @@ -48,10 +135,9 @@ class test_Command(AppCase): cmd = Command() with self.assertRaises(SystemExit): cmd.early_version(['--version']) - stdout.write.assert_called_with(cmd.version + '\n') def test_execute_from_commandline(self): - cmd = MockCommand() + cmd = MockCommand(app=self.app) args1, kwargs1 = cmd.execute_from_commandline() # sys.argv self.assertTupleEqual(args1, cmd.mock_args) self.assertDictContainsSubset({'foo': 'bar'}, kwargs1) @@ -62,18 +148,18 @@ class test_Command(AppCase): kwargs2) def test_with_bogus_args(self): - cmd = MockCommand() - cmd.supports_args = False with override_stdouts() as (_, stderr): + cmd = MockCommand(app=self.app) + cmd.supports_args = False with self.assertRaises(SystemExit): cmd.execute_from_commandline(argv=['--bogus']) - self.assertTrue(stderr.getvalue()) - self.assertIn('Unrecognized', stderr.getvalue()) + self.assertTrue(stderr.getvalue()) + self.assertIn('Unrecognized', stderr.getvalue()) def test_with_custom_config_module(self): prev = os.environ.pop('CELERY_CONFIG_MODULE', None) try: - cmd = MockCommand() + cmd = MockCommand(app=self.app) cmd.setup_app_from_commandline(['--config=foo.bar.baz']) self.assertEqual(os.environ.get('CELERY_CONFIG_MODULE'), 'foo.bar.baz') @@ -86,7 +172,7 @@ class test_Command(AppCase): def test_with_custom_broker(self): prev = os.environ.pop('CELERY_BROKER_URL', None) try: - cmd = MockCommand() + cmd = MockCommand(app=self.app) cmd.setup_app_from_commandline(['--broker=xyzza://']) self.assertEqual( os.environ.get('CELERY_BROKER_URL'), 'xyzza://', @@ -98,30 +184,115 @@ class test_Command(AppCase): os.environ.pop('CELERY_BROKER_URL', None) def test_with_custom_app(self): - cmd = MockCommand() + cmd = MockCommand(app=self.app) app = '.'.join([__name__, 'APP']) cmd.setup_app_from_commandline(['--app=%s' % (app, ), '--loglevel=INFO']) self.assertIs(cmd.app, APP) + cmd.setup_app_from_commandline(['-A', app, + '--loglevel=INFO']) + self.assertIs(cmd.app, APP) - def test_with_cmdline_config(self): - cmd = MockCommand() + def test_setup_app_sets_quiet(self): + cmd = MockCommand(app=self.app) + cmd.setup_app_from_commandline(['-q']) + self.assertTrue(cmd.quiet) + cmd2 = MockCommand(app=self.app) + cmd2.setup_app_from_commandline(['--quiet']) + self.assertTrue(cmd2.quiet) + + def test_setup_app_sets_chdir(self): + with patch('os.chdir') as chdir: + cmd = MockCommand(app=self.app) + cmd.setup_app_from_commandline(['--workdir=/opt']) + chdir.assert_called_with('/opt') + + def test_setup_app_sets_loader(self): + prev = os.environ.get('CELERY_LOADER') try: - cmd.enable_config_from_cmdline = True - cmd.namespace = 'celeryd' - rest = cmd.setup_app_from_commandline(argv=[ - '--loglevel=INFO', '--', - 'broker.url=amqp://broker.example.com', - '.prefetch_multiplier=100']) - self.assertEqual(cmd.app.conf.BROKER_URL, - 'amqp://broker.example.com') - self.assertEqual(cmd.app.conf.CELERYD_PREFETCH_MULTIPLIER, 100) - self.assertListEqual(rest, ['--loglevel=INFO']) + cmd = MockCommand(app=self.app) + cmd.setup_app_from_commandline(['--loader=X.Y:Z']) + self.assertEqual(os.environ['CELERY_LOADER'], 'X.Y:Z') finally: - cmd.app.conf.BROKER_URL = 'memory://' + if prev is not None: + os.environ['CELERY_LOADER'] = prev + + def test_setup_app_no_respect(self): + cmd = MockCommand(app=self.app) + cmd.respects_app_option = False + with patch('celery.bin.base.Celery') as cp: + cmd.setup_app_from_commandline(['--app=x.y:z']) + self.assertTrue(cp.called) + + def test_setup_app_custom_app(self): + cmd = MockCommand(app=self.app) + app = cmd.app = Mock() + app.user_options = {'preload': None} + cmd.setup_app_from_commandline([]) + self.assertEqual(cmd.app, app) + + def test_find_app_suspects(self): + cmd = MockCommand(app=self.app) + self.assertTrue(cmd.find_app('celery.tests.bin.proj.app')) + self.assertTrue(cmd.find_app('celery.tests.bin.proj')) + self.assertTrue(cmd.find_app('celery.tests.bin.proj:hello')) + self.assertTrue(cmd.find_app('celery.tests.bin.proj.app:app')) + + with self.assertRaises(AttributeError): + cmd.find_app(__name__) + + def test_simple_format(self): + cmd = MockCommand(app=self.app) + with patch('socket.gethostname') as hn: + hn.return_value = 'blacktron.example.com' + self.assertEqual(cmd.simple_format(''), '') + self.assertEqual( + cmd.simple_format('celery@%h'), + 'celery@blacktron.example.com', + ) + self.assertEqual( + cmd.simple_format('celery@%d'), + 'celery@example.com', + ) + self.assertEqual( + cmd.simple_format('celery@%n'), + 'celery@blacktron', + ) + + def test_say_chat_quiet(self): + cmd = MockCommand(app=self.app) + cmd.quiet = True + self.assertIsNone(cmd.say_chat('<-', 'foo', 'foo')) + + def test_say_chat_show_body(self): + cmd = MockCommand(app=self.app) + cmd.out = Mock() + cmd.show_body = True + cmd.say_chat('->', 'foo', 'body') + cmd.out.assert_called_with('body') + + def test_say_chat_no_body(self): + cmd = MockCommand(app=self.app) + cmd.out = Mock() + cmd.show_body = False + cmd.say_chat('->', 'foo', 'body') + + @depends_on_current_app + def test_with_cmdline_config(self): + cmd = MockCommand(app=self.app) + cmd.enable_config_from_cmdline = True + cmd.namespace = 'celeryd' + rest = cmd.setup_app_from_commandline(argv=[ + '--loglevel=INFO', '--', + 'broker.url=amqp://broker.example.com', + '.prefetch_multiplier=100']) + self.assertEqual(cmd.app.conf.BROKER_URL, + 'amqp://broker.example.com') + self.assertEqual(cmd.app.conf.CELERYD_PREFETCH_MULTIPLIER, 100) + self.assertListEqual(rest, ['--loglevel=INFO']) def test_find_app(self): - cmd = MockCommand() + cmd = MockCommand(app=self.app) with patch('celery.bin.base.symbol_by_name') as sbn: from types import ModuleType x = ModuleType('proj') diff --git a/awx/lib/site-packages/celery/tests/bin/test_celerybeat.py b/awx/lib/site-packages/celery/tests/bin/test_beat.py similarity index 61% rename from awx/lib/site-packages/celery/tests/bin/test_celerybeat.py rename to awx/lib/site-packages/celery/tests/bin/test_beat.py index 5fe35d3e89..45a74389a4 100644 --- a/awx/lib/site-packages/celery/tests/bin/test_celerybeat.py +++ b/awx/lib/site-packages/celery/tests/bin/test_beat.py @@ -1,21 +1,17 @@ from __future__ import absolute_import -from __future__ import with_statement import logging import sys from collections import defaultdict -from kombu.tests.utils import redirect_stdouts -from mock import patch - from celery import beat from celery import platforms -from celery.app import app_or_default -from celery.bin import celerybeat as celerybeat_bin +from celery.bin import beat as beat_bin from celery.apps import beat as beatapp -from celery.tests.utils import AppCase +from celery.tests.case import AppCase, Mock, patch, restore_logging +from kombu.tests.case import redirect_stdouts class MockedShelveModule(object): @@ -62,22 +58,32 @@ class MockBeat3(beatapp.Beat): class test_Beat(AppCase): def test_loglevel_string(self): - b = beatapp.Beat(loglevel='DEBUG') + b = beatapp.Beat(app=self.app, loglevel='DEBUG', + redirect_stdouts=False) self.assertEqual(b.loglevel, logging.DEBUG) - b2 = beatapp.Beat(loglevel=logging.DEBUG) + b2 = beatapp.Beat(app=self.app, loglevel=logging.DEBUG, + redirect_stdouts=False) self.assertEqual(b2.loglevel, logging.DEBUG) + def test_colorize(self): + self.app.log.setup = Mock() + b = beatapp.Beat(app=self.app, no_color=True, + redirect_stdouts=False) + b.setup_logging() + self.assertTrue(self.app.log.setup.called) + self.assertEqual(self.app.log.setup.call_args[1]['colorize'], False) + def test_init_loader(self): - b = beatapp.Beat() + b = beatapp.Beat(app=self.app, redirect_stdouts=False) b.init_loader() def test_process_title(self): - b = beatapp.Beat() + b = beatapp.Beat(app=self.app, redirect_stdouts=False) b.set_process_title() def test_run(self): - b = MockBeat2() + b = MockBeat2(app=self.app, redirect_stdouts=False) MockService.started = False b.run() self.assertTrue(MockService.started) @@ -98,8 +104,8 @@ class test_Beat(AppCase): platforms.signals = p def test_install_sync_handler(self): - b = beatapp.Beat() - clock = MockService() + b = beatapp.Beat(app=self.app, redirect_stdouts=False) + clock = MockService(app=self.app) MockService.in_sync = False handlers = self.psig(b.install_sync_handler, clock) with self.assertRaises(SystemExit): @@ -108,29 +114,34 @@ class test_Beat(AppCase): MockService.in_sync = False def test_setup_logging(self): - try: - # py3k - delattr(sys.stdout, 'logger') - except AttributeError: - pass - b = beatapp.Beat() - b.redirect_stdouts = False - b.app.log.__class__._setup = False - b.setup_logging() - with self.assertRaises(AttributeError): - sys.stdout.logger + with restore_logging(): + try: + # py3k + delattr(sys.stdout, 'logger') + except AttributeError: + pass + b = beatapp.Beat(app=self.app, redirect_stdouts=False) + b.redirect_stdouts = False + b.app.log.already_setup = False + b.setup_logging() + with self.assertRaises(AttributeError): + sys.stdout.logger @redirect_stdouts @patch('celery.apps.beat.logger') def test_logs_errors(self, logger, stdout, stderr): - b = MockBeat3(socket_timeout=None) - b.start_scheduler() - self.assertTrue(logger.critical.called) + with restore_logging(): + b = MockBeat3( + app=self.app, redirect_stdouts=False, socket_timeout=None, + ) + b.start_scheduler() + self.assertTrue(logger.critical.called) @redirect_stdouts @patch('celery.platforms.create_pidlock') def test_use_pidfile(self, create_pidlock, stdout, stderr): - b = MockBeat2(pidfile='pidfilelockfilepid', socket_timeout=None) + b = MockBeat2(app=self.app, pidfile='pidfilelockfilepid', + socket_timeout=None, redirect_stdouts=False) b.start_scheduler() self.assertTrue(create_pidlock.called) @@ -156,8 +167,9 @@ class test_div(AppCase): def setup(self): self.prev, beatapp.Beat = beatapp.Beat, MockBeat - self.ctx, celerybeat_bin.detached = ( - celerybeat_bin.detached, MockDaemonContext) + self.ctx, beat_bin.detached = ( + beat_bin.detached, MockDaemonContext, + ) def teardown(self): beatapp.Beat = self.prev @@ -165,20 +177,20 @@ class test_div(AppCase): def test_main(self): sys.argv = [sys.argv[0], '-s', 'foo'] try: - celerybeat_bin.main() + beat_bin.main(app=self.app) self.assertTrue(MockBeat.running) finally: MockBeat.running = False def test_detach(self): - cmd = celerybeat_bin.BeatCommand() - cmd.app = app_or_default() + cmd = beat_bin.beat() + cmd.app = self.app cmd.run(detach=True) self.assertTrue(MockDaemonContext.opened) self.assertTrue(MockDaemonContext.closed) def test_parse_options(self): - cmd = celerybeat_bin.BeatCommand() - cmd.app = app_or_default() - options, args = cmd.parse_options('celerybeat', ['-s', 'foo']) + cmd = beat_bin.beat() + cmd.app = self.app + options, args = cmd.parse_options('celery beat', ['-s', 'foo']) self.assertEqual(options.schedule, 'foo') diff --git a/awx/lib/site-packages/celery/tests/bin/test_celery.py b/awx/lib/site-packages/celery/tests/bin/test_celery.py index 9fc462b7da..a89d3f036c 100644 --- a/awx/lib/site-packages/celery/tests/bin/test_celery.py +++ b/awx/lib/site-packages/celery/tests/bin/test_celery.py @@ -1,36 +1,79 @@ from __future__ import absolute_import -from __future__ import with_statement + +import sys from anyjson import dumps from datetime import datetime -from mock import Mock, patch -from celery import task +from celery import __main__ from celery.platforms import EX_FAILURE, EX_USAGE, EX_OK +from celery.bin.base import Error from celery.bin.celery import ( Command, - Error, - worker, list_, call, purge, result, inspect, + control, status, migrate, help, report, CeleryCommand, determine_exit_status, - main, + multi, + main as mainfun, + _RemoteControl, + command, ) -from celery.tests.utils import AppCase, WhateverIO +from celery.tests.case import ( + AppCase, Mock, WhateverIO, override_stdouts, patch, +) -@task() -def add(x, y): - return x + y +class test__main__(AppCase): + + def test_warn_deprecated(self): + with override_stdouts() as (stdout, _): + __main__._warn_deprecated('YADDA YADDA') + self.assertIn('command is deprecated', stdout.getvalue()) + self.assertIn('YADDA YADDA', stdout.getvalue()) + + def test_main(self): + with patch('celery.__main__.maybe_patch_concurrency') as mpc: + with patch('celery.bin.celery.main') as main: + __main__.main() + mpc.assert_called_with() + main.assert_called_with() + + def test_compat_worker(self): + with patch('celery.__main__.maybe_patch_concurrency') as mpc: + with patch('celery.__main__._warn_deprecated') as depr: + with patch('celery.bin.worker.main') as main: + __main__._compat_worker() + mpc.assert_called_with() + depr.assert_called_with('celery worker') + main.assert_called_with() + + def test_compat_multi(self): + with patch('celery.__main__.maybe_patch_concurrency') as mpc: + with patch('celery.__main__._warn_deprecated') as depr: + with patch('celery.bin.multi.main') as main: + __main__._compat_multi() + mpc.assert_called_with() + depr.assert_called_with('celery multi') + main.assert_called_with() + + def test_compat_beat(self): + with patch('celery.__main__.maybe_patch_concurrency') as mpc: + with patch('celery.__main__._warn_deprecated') as depr: + with patch('celery.bin.beat.main') as main: + __main__._compat_beat() + mpc.assert_called_with() + depr.assert_called_with('celery beat') + main.assert_called_with() class test_Command(AppCase): @@ -46,13 +89,6 @@ class test_Command(AppCase): self.err = WhateverIO() self.cmd = Command(self.app, stdout=self.out, stderr=self.err) - def test_show_help(self): - self.cmd.run_from_argv = Mock() - self.assertEqual(self.cmd.show_help('foo'), EX_USAGE) - self.cmd.run_from_argv.assert_called_with( - self.cmd.prog_name, ['foo', '--help'] - ) - def test_error(self): self.cmd.out = Mock() self.cmd.error('FOO') @@ -61,52 +97,42 @@ class test_Command(AppCase): def test_out(self): f = Mock() self.cmd.out('foo', f) - f.write.assert_called_with('foo\n') - self.cmd.out('foo\n', f) def test_call(self): - self.cmd.run = Mock() - self.cmd.run.return_value = None + + def ok_run(): + pass + + self.cmd.run = ok_run self.assertEqual(self.cmd(), EX_OK) - self.cmd.run.side_effect = Error('error', EX_FAILURE) + def error_run(): + raise Error('error', EX_FAILURE) + self.cmd.run = error_run self.assertEqual(self.cmd(), EX_FAILURE) def test_run_from_argv(self): with self.assertRaises(NotImplementedError): self.cmd.run_from_argv('prog', ['foo', 'bar']) - self.assertEqual(self.cmd.prog_name, 'prog') - def test_prettify_list(self): - self.assertEqual(self.cmd.prettify([])[1], '- empty -') - self.assertIn('bar', self.cmd.prettify(['foo', 'bar'])[1]) + def test_pretty_list(self): + self.assertEqual(self.cmd.pretty([])[1], '- empty -') + self.assertIn('bar', self.cmd.pretty(['foo', 'bar'])[1]) - def test_prettify_dict(self): + def test_pretty_dict(self): self.assertIn( 'OK', - str(self.cmd.prettify({'ok': 'the quick brown fox'})[0]), + str(self.cmd.pretty({'ok': 'the quick brown fox'})[0]), ) self.assertIn( 'ERROR', - str(self.cmd.prettify({'error': 'the quick brown fox'})[0]), + str(self.cmd.pretty({'error': 'the quick brown fox'})[0]), ) - def test_prettify(self): - self.assertIn('OK', str(self.cmd.prettify('the quick brown'))) - self.assertIn('OK', str(self.cmd.prettify(object()))) - self.assertIn('OK', str(self.cmd.prettify({'foo': 'bar'}))) - - -class test_Delegate(AppCase): - - def test_get_options(self): - self.assertTrue(worker(app=self.app).get_options()) - - def test_run(self): - w = worker() - w.target.run = Mock() - w.run() - w.target.run.assert_called_with() + def test_pretty(self): + self.assertIn('OK', str(self.cmd.pretty('the quick brown'))) + self.assertIn('OK', str(self.cmd.pretty(object()))) + self.assertIn('OK', str(self.cmd.pretty({'foo': 'bar'}))) class test_list(AppCase): @@ -131,28 +157,35 @@ class test_list(AppCase): class test_call(AppCase): + def setup(self): + + @self.app.task(shared=False) + def add(x, y): + return x + y + self.add = add + @patch('celery.app.base.Celery.send_task') def test_run(self, send_task): a = call(app=self.app, stderr=WhateverIO(), stdout=WhateverIO()) - a.run('tasks.add') + a.run(self.add.name) self.assertTrue(send_task.called) - a.run('tasks.add', + a.run(self.add.name, args=dumps([4, 4]), kwargs=dumps({'x': 2, 'y': 2})) self.assertEqual(send_task.call_args[1]['args'], [4, 4]) self.assertEqual(send_task.call_args[1]['kwargs'], {'x': 2, 'y': 2}) - a.run('tasks.add', expires=10, countdown=10) + a.run(self.add.name, expires=10, countdown=10) self.assertEqual(send_task.call_args[1]['expires'], 10) self.assertEqual(send_task.call_args[1]['countdown'], 10) now = datetime.now() iso = now.isoformat() - a.run('tasks.add', expires=iso) + a.run(self.add.name, expires=iso) self.assertEqual(send_task.call_args[1]['expires'], now) with self.assertRaises(ValueError): - a.run('tasks.add', expires='foobaribazibar') + a.run(self.add.name, expires='foobaribazibar') class test_purge(AppCase): @@ -172,17 +205,28 @@ class test_purge(AppCase): class test_result(AppCase): - @patch('celery.result.AsyncResult.get') - def test_run(self, get): - out = WhateverIO() - r = result(app=self.app, stdout=out) - get.return_value = 'Jerry' - r.run('id') - self.assertIn('Jerry', out.getvalue()) + def setup(self): - get.return_value = 'Elaine' - r.run('id', task=add.name) - self.assertIn('Elaine', out.getvalue()) + @self.app.task(shared=False) + def add(x, y): + return x + y + self.add = add + + def test_run(self): + with patch('celery.result.AsyncResult.get') as get: + out = WhateverIO() + r = result(app=self.app, stdout=out) + get.return_value = 'Jerry' + r.run('id') + self.assertIn('Jerry', out.getvalue()) + + get.return_value = 'Elaine' + r.run('id', task=self.add.name) + self.assertIn('Elaine', out.getvalue()) + + with patch('celery.result.AsyncResult.traceback') as tb: + r.run('id', task=self.add.name, traceback=True) + self.assertIn(str(tb), out.getvalue()) class test_status(AppCase): @@ -208,7 +252,7 @@ class test_migrate(AppCase): def test_run(self, migrate_tasks): out = WhateverIO() m = migrate(app=self.app, stdout=out, stderr=WhateverIO()) - with self.assertRaises(SystemExit): + with self.assertRaises(TypeError): m.run() self.assertFalse(migrate_tasks.called) @@ -260,6 +304,28 @@ class test_CeleryCommand(AppCase): with self.assertRaises(SystemExit): x.execute_from_commandline() + x.respects_app_option = True + with self.assertRaises(SystemExit): + x.execute_from_commandline(['celery', 'multi']) + self.assertFalse(x.respects_app_option) + x.respects_app_option = True + with self.assertRaises(SystemExit): + x.execute_from_commandline(['manage.py', 'celery', 'multi']) + self.assertFalse(x.respects_app_option) + + def test_with_pool_option(self): + x = CeleryCommand(app=self.app) + self.assertIsNone(x.with_pool_option(['celery', 'events'])) + self.assertTrue(x.with_pool_option(['celery', 'worker'])) + self.assertTrue(x.with_pool_option(['manage.py', 'celery', 'worker'])) + + def test_load_extensions_no_commands(self): + with patch('celery.bin.celery.Extensions') as Ext: + ext = Ext.return_value = Mock(name='Extension') + ext.load.return_value = None + x = CeleryCommand(app=self.app) + x.load_extension_commands() + def test_determine_exit_status(self): self.assertEqual(determine_exit_status('true'), EX_OK) self.assertEqual(determine_exit_status(''), EX_FAILURE) @@ -286,18 +352,49 @@ class test_CeleryCommand(AppCase): Help = x.commands['help'] = Mock() help = Help.return_value = Mock() x.execute('fooox', ['a']) - help.run_from_argv.assert_called_with(x.prog_name, ['help']) + help.run_from_argv.assert_called_with(x.prog_name, [], command='help') help.reset() x.execute('help', ['help']) - help.run_from_argv.assert_called_with(x.prog_name, ['help']) + help.run_from_argv.assert_called_with(x.prog_name, [], command='help') Dummy = x.commands['dummy'] = Mock() dummy = Dummy.return_value = Mock() dummy.run_from_argv.side_effect = Error('foo', status='EX_FAILURE') help.reset() x.execute('dummy', ['dummy']) - dummy.run_from_argv.assert_called_with(x.prog_name, ['dummy']) - help.run_from_argv.assert_called_with(x.prog_name, ['dummy']) + dummy.run_from_argv.assert_called_with( + x.prog_name, [], command='dummy', + ) + help.run_from_argv.assert_called_with( + x.prog_name, [], command='help', + ) + + exc = dummy.run_from_argv.side_effect = x.UsageError('foo') + x.on_usage_error = Mock() + x.execute('dummy', ['dummy']) + x.on_usage_error.assert_called_with(exc) + + def test_on_usage_error(self): + x = CeleryCommand(app=self.app) + x.error = Mock() + x.on_usage_error(x.UsageError('foo'), command=None) + self.assertTrue(x.error.called) + x.on_usage_error(x.UsageError('foo'), command='dummy') + + def test_prepare_prog_name(self): + x = CeleryCommand(app=self.app) + main = Mock(name='__main__') + main.__file__ = '/opt/foo.py' + with patch.dict(sys.modules, __main__=main): + self.assertEqual(x.prepare_prog_name('__main__.py'), '/opt/foo.py') + self.assertEqual(x.prepare_prog_name('celery'), 'celery') + + +class test_RemoteControl(AppCase): + + def test_call_interface(self): + with self.assertRaises(NotImplementedError): + _RemoteControl(app=self.app).call() class test_inspect(AppCase): @@ -305,6 +402,47 @@ class test_inspect(AppCase): def test_usage(self): self.assertTrue(inspect(app=self.app).usage('foo')) + def test_command_info(self): + i = inspect(app=self.app) + self.assertTrue(i.get_command_info( + 'ping', help=True, color=i.colored.red, + )) + + def test_list_commands_color(self): + i = inspect(app=self.app) + self.assertTrue(i.list_commands( + help=True, color=i.colored.red, + )) + self.assertTrue(i.list_commands( + help=False, color=None, + )) + + def test_epilog(self): + self.assertTrue(inspect(app=self.app).epilog) + + def test_do_call_method_sql_transport_type(self): + self.app.connection = Mock() + conn = self.app.connection.return_value = Mock(name='Connection') + conn.transport.driver_type = 'sql' + i = inspect(app=self.app) + with self.assertRaises(i.Error): + i.do_call_method(['ping']) + + def test_say_directions(self): + i = inspect(self.app) + i.out = Mock() + i.quiet = True + i.say_chat('<-', 'hello out') + self.assertFalse(i.out.called) + + i.say_chat('->', 'hello in') + self.assertTrue(i.out.called) + + i.quiet = False + i.out.reset_mock() + i.say_chat('<-', 'hello out', 'body') + self.assertTrue(i.out.called) + @patch('celery.app.control.Control.inspect') def test_run(self, real): out = WhateverIO() @@ -334,14 +472,101 @@ class test_inspect(AppCase): out.seek(0) out.truncate() i.quiet = True - i.say('<-', 'hello') + i.say_chat('<-', 'hello') self.assertFalse(out.getvalue()) +class test_control(AppCase): + + def control(self, patch_call, *args, **kwargs): + kwargs.setdefault('app', Mock(name='app')) + c = control(*args, **kwargs) + if patch_call: + c.call = Mock(name='control.call') + return c + + def test_call(self): + i = self.control(False) + i.call('foo', 1, kw=2) + i.app.control.foo.assert_called_with(1, kw=2, reply=True) + + def test_pool_grow(self): + i = self.control(True) + i.pool_grow('pool_grow', n=2) + i.call.assert_called_with('pool_grow', 2) + + def test_pool_shrink(self): + i = self.control(True) + i.pool_shrink('pool_shrink', n=2) + i.call.assert_called_with('pool_shrink', 2) + + def test_autoscale(self): + i = self.control(True) + i.autoscale('autoscale', max=3, min=2) + i.call.assert_called_with('autoscale', 3, 2) + + def test_rate_limit(self): + i = self.control(True) + i.rate_limit('rate_limit', 'proj.add', '1/s') + i.call.assert_called_with('rate_limit', 'proj.add', '1/s', reply=True) + + def test_time_limit(self): + i = self.control(True) + i.time_limit('time_limit', 'proj.add', 10, 30) + i.call.assert_called_with('time_limit', 'proj.add', 10, 30, reply=True) + + def test_add_consumer(self): + i = self.control(True) + i.add_consumer( + 'add_consumer', 'queue', 'exchange', 'topic', 'rkey', + durable=True, + ) + i.call.assert_called_with( + 'add_consumer', 'queue', 'exchange', 'topic', 'rkey', + durable=True, reply=True, + ) + + def test_cancel_consumer(self): + i = self.control(True) + i.cancel_consumer('cancel_consumer', 'queue') + i.call.assert_called_with('cancel_consumer', 'queue', reply=True) + + +class test_multi(AppCase): + + def test_get_options(self): + self.assertTupleEqual(multi(app=self.app).get_options(), ()) + + def test_run_from_argv(self): + with patch('celery.bin.multi.MultiTool') as MultiTool: + m = MultiTool.return_value = Mock() + multi(self.app).run_from_argv('celery', ['arg'], command='multi') + m.execute_from_commandline.assert_called_with( + ['multi', 'arg'], 'celery', + ) + + class test_main(AppCase): @patch('celery.bin.celery.CeleryCommand') def test_main(self, Command): - command = Command.return_value = Mock() - main() - command.execute_from_commandline.assert_called_with(None) + cmd = Command.return_value = Mock() + mainfun() + cmd.execute_from_commandline.assert_called_with(None) + + @patch('celery.bin.celery.CeleryCommand') + def test_main_KeyboardInterrupt(self, Command): + cmd = Command.return_value = Mock() + cmd.execute_from_commandline.side_effect = KeyboardInterrupt() + mainfun() + cmd.execute_from_commandline.assert_called_with(None) + + +class test_compat(AppCase): + + def test_compat_command_decorator(self): + with patch('celery.bin.celery.CeleryCommand') as CC: + self.assertEqual(command(), CC.register_command) + fun = Mock(name='fun') + command(fun) + CC.register_command.assert_called_with(fun) diff --git a/awx/lib/site-packages/celery/tests/bin/test_celeryd_detach.py b/awx/lib/site-packages/celery/tests/bin/test_celeryd_detach.py index d0d46c79ec..2b6e5ae8d4 100644 --- a/awx/lib/site-packages/celery/tests/bin/test_celeryd_detach.py +++ b/awx/lib/site-packages/celery/tests/bin/test_celeryd_detach.py @@ -1,20 +1,17 @@ from __future__ import absolute_import -from __future__ import with_statement -from mock import Mock, patch - -from celery import current_app +from celery.platforms import IS_WINDOWS from celery.bin.celeryd_detach import ( detach, detached_celeryd, main, ) -from celery.tests.utils import Case, override_stdouts +from celery.tests.case import AppCase, Mock, override_stdouts, patch -if not current_app.IS_WINDOWS: - class test_detached(Case): +if not IS_WINDOWS: + class test_detached(AppCase): @patch('celery.bin.celeryd_detach.detached') @patch('os.execv') @@ -33,17 +30,17 @@ if not current_app.IS_WINDOWS: execv.side_effect = Exception('foo') r = detach('/bin/boo', ['a', 'b', 'c'], - logfile='/var/log', pidfile='/var/pid') + logfile='/var/log', pidfile='/var/pid', app=self.app) context.__enter__.assert_called_with() self.assertTrue(logger.critical.called) setup_logs.assert_called_with('ERROR', '/var/log') self.assertEqual(r, 1) -class test_PartialOptionParser(Case): +class test_PartialOptionParser(AppCase): def test_parser(self): - x = detached_celeryd() + x = detached_celeryd(self.app) p = x.Parser('celeryd_detach') options, values = p.parse_args(['--logfile=foo', '--fake', '--enable', 'a', 'b', '-c1', '-d', '2']) @@ -65,13 +62,13 @@ class test_PartialOptionParser(Case): p.get_option('--logfile').nargs = 1 -class test_Command(Case): +class test_Command(AppCase): argv = ['--autoscale=10,2', '-c', '1', '--logfile=/var/log', '-lDEBUG', '--', '.disable_rate_limits=1'] def test_parse_options(self): - x = detached_celeryd() + x = detached_celeryd(app=self.app) o, v, l = x.parse_options('cd', self.argv) self.assertEqual(o.logfile, '/var/log') self.assertEqual(l, ['--autoscale=10,2', '-c', '1', @@ -82,19 +79,22 @@ class test_Command(Case): @patch('sys.exit') @patch('celery.bin.celeryd_detach.detach') def test_execute_from_commandline(self, detach, exit): - x = detached_celeryd() + x = detached_celeryd(app=self.app) x.execute_from_commandline(self.argv) self.assertTrue(exit.called) detach.assert_called_with( path=x.execv_path, uid=None, gid=None, umask=0, fake=False, logfile='/var/log', pidfile='celeryd.pid', - argv=['-m', 'celery.bin.celeryd', '-c', '1', '-lDEBUG', - '--logfile=/var/log', '--pidfile=celeryd.pid', - '--', '.disable_rate_limits=1'], + argv=x.execv_argv + [ + '-c', '1', '-lDEBUG', + '--logfile=/var/log', '--pidfile=celeryd.pid', + '--', '.disable_rate_limits=1' + ], + app=self.app, ) @patch('celery.bin.celeryd_detach.detached_celeryd') def test_main(self, command): c = command.return_value = Mock() - main() + main(self.app) c.execute_from_commandline.assert_called_with() diff --git a/awx/lib/site-packages/celery/tests/bin/test_celeryevdump.py b/awx/lib/site-packages/celery/tests/bin/test_celeryevdump.py index b04f85a60f..09cdc4d1ff 100644 --- a/awx/lib/site-packages/celery/tests/bin/test_celeryevdump.py +++ b/awx/lib/site-packages/celery/tests/bin/test_celeryevdump.py @@ -1,6 +1,5 @@ from __future__ import absolute_import -from mock import patch from time import time from celery.events.dumper import ( @@ -9,12 +8,12 @@ from celery.events.dumper import ( evdump, ) -from celery.tests.utils import Case, WhateverIO +from celery.tests.case import AppCase, Mock, WhateverIO, patch -class test_Dumper(Case): +class test_Dumper(AppCase): - def setUp(self): + def setup(self): self.out = WhateverIO() self.dumper = Dumper(out=self.out) @@ -24,12 +23,12 @@ class test_Dumper(Case): def test_format_task_event(self): self.dumper.format_task_event( - 'worker.example.com', time(), 'task-started', 'tasks.add', {}) + 'worker@example.com', time(), 'task-started', 'tasks.add', {}) self.assertTrue(self.out.getvalue()) def test_on_event(self): event = { - 'hostname': 'worker.example.com', + 'hostname': 'worker@example.com', 'timestamp': time(), 'uuid': '1ef', 'name': 'tasks.add', @@ -44,4 +43,26 @@ class test_Dumper(Case): @patch('celery.events.EventReceiver.capture') def test_evdump(self, capture): capture.side_effect = KeyboardInterrupt() - evdump() + evdump(app=self.app) + + def test_evdump_error_handler(self): + app = Mock(name='app') + with patch('celery.events.dumper.Dumper') as Dumper: + Dumper.return_value = Mock(name='dumper') + recv = app.events.Receiver.return_value = Mock() + + def se(*_a, **_k): + recv.capture.side_effect = SystemExit() + raise KeyError() + recv.capture.side_effect = se + + Conn = app.connection.return_value = Mock(name='conn') + conn = Conn.clone.return_value = Mock(name='cloned_conn') + conn.connection_errors = (KeyError, ) + conn.channel_errors = () + + evdump(app) + self.assertTrue(conn.ensure_connection.called) + errback = conn.ensure_connection.call_args[0][0] + errback(KeyError(), 1) + self.assertTrue(conn.as_uri.called) diff --git a/awx/lib/site-packages/celery/tests/bin/test_celeryev.py b/awx/lib/site-packages/celery/tests/bin/test_events.py similarity index 53% rename from awx/lib/site-packages/celery/tests/bin/test_celeryev.py rename to awx/lib/site-packages/celery/tests/bin/test_events.py index 02313970c3..a6e79f75af 100644 --- a/awx/lib/site-packages/celery/tests/bin/test_celeryev.py +++ b/awx/lib/site-packages/celery/tests/bin/test_events.py @@ -1,13 +1,8 @@ from __future__ import absolute_import -from __future__ import with_statement -from nose import SkipTest -from mock import patch as mpatch +from celery.bin import events -from celery.app import app_or_default -from celery.bin import celeryev - -from celery.tests.utils import Case, patch +from celery.tests.case import AppCase, SkipTest, patch, _old_patch class MockCommand(object): @@ -22,17 +17,17 @@ def proctitle(prog, info=None): proctitle.last = () -class test_EvCommand(Case): +class test_events(AppCase): - def setUp(self): - self.app = app_or_default() - self.ev = celeryev.EvCommand(app=self.app) + def setup(self): + self.ev = events.events(app=self.app) - @patch('celery.events.dumper', 'evdump', lambda **kw: 'me dumper, you?') - @patch('celery.bin.celeryev', 'set_process_title', proctitle) + @_old_patch('celery.events.dumper', 'evdump', + lambda **kw: 'me dumper, you?') + @_old_patch('celery.bin.events', 'set_process_title', proctitle) def test_run_dump(self): self.assertEqual(self.ev.run(dump=True), 'me dumper, you?') - self.assertIn('celeryev:dump', proctitle.last[0]) + self.assertIn('celery events:dump', proctitle.last[0]) def test_run_top(self): try: @@ -40,15 +35,17 @@ class test_EvCommand(Case): except ImportError: raise SkipTest('curses monitor requires curses') - @patch('celery.events.cursesmon', 'evtop', lambda **kw: 'me top, you?') - @patch('celery.bin.celeryev', 'set_process_title', proctitle) + @_old_patch('celery.events.cursesmon', 'evtop', + lambda **kw: 'me top, you?') + @_old_patch('celery.bin.events', 'set_process_title', proctitle) def _inner(): self.assertEqual(self.ev.run(), 'me top, you?') - self.assertIn('celeryev:top', proctitle.last[0]) + self.assertIn('celery events:top', proctitle.last[0]) return _inner() - @patch('celery.events.snapshot', 'evcam', lambda *a, **k: (a, k)) - @patch('celery.bin.celeryev', 'set_process_title', proctitle) + @_old_patch('celery.events.snapshot', 'evcam', + lambda *a, **k: (a, k)) + @_old_patch('celery.bin.events', 'set_process_title', proctitle) def test_run_cam(self): a, kw = self.ev.run(camera='foo.bar.baz', logfile='logfile') self.assertEqual(a[0], 'foo.bar.baz') @@ -56,12 +53,12 @@ class test_EvCommand(Case): self.assertIsNone(kw['maxrate']) self.assertEqual(kw['loglevel'], 'INFO') self.assertEqual(kw['logfile'], 'logfile') - self.assertIn('celeryev:cam', proctitle.last[0]) + self.assertIn('celery events:cam', proctitle.last[0]) - @mpatch('celery.events.snapshot.evcam') - @mpatch('celery.bin.celeryev.detached') + @patch('celery.events.snapshot.evcam') + @patch('celery.bin.events.detached') def test_run_cam_detached(self, detached, evcam): - self.ev.prog_name = 'celeryev' + self.ev.prog_name = 'celery events' self.ev.run_evcam('myapp.Camera', detach=True) self.assertTrue(detached.called) self.assertTrue(evcam.called) @@ -69,8 +66,8 @@ class test_EvCommand(Case): def test_get_options(self): self.assertTrue(self.ev.get_options()) - @patch('celery.bin.celeryev', 'EvCommand', MockCommand) + @_old_patch('celery.bin.events', 'events', MockCommand) def test_main(self): MockCommand.executed = [] - celeryev.main() + events.main() self.assertTrue(MockCommand.executed) diff --git a/awx/lib/site-packages/celery/tests/bin/test_celeryd_multi.py b/awx/lib/site-packages/celery/tests/bin/test_multi.py similarity index 74% rename from awx/lib/site-packages/celery/tests/bin/test_celeryd_multi.py rename to awx/lib/site-packages/celery/tests/bin/test_multi.py index bb03a6b36b..0b2ecd9815 100644 --- a/awx/lib/site-packages/celery/tests/bin/test_celeryd_multi.py +++ b/awx/lib/site-packages/celery/tests/bin/test_multi.py @@ -1,13 +1,10 @@ from __future__ import absolute_import -from __future__ import with_statement import errno import signal import sys -from mock import Mock, patch - -from celery.bin.celeryd_multi import ( +from celery.bin.multi import ( main, MultiTool, findsig, @@ -20,10 +17,10 @@ from celery.bin.celeryd_multi import ( __doc__ as doc, ) -from celery.tests.utils import Case, WhateverIO +from celery.tests.case import AppCase, Mock, WhateverIO, SkipTest, patch -class test_functions(Case): +class test_functions(AppCase): def test_findsig(self): self.assertEqual(findsig(['a', 'b', 'c', '-1']), 1) @@ -58,7 +55,7 @@ class test_functions(Case): self.assertEqual(quote("the 'quick"), "'the '\\''quick'") -class test_NamespacedOptionParser(Case): +class test_NamespacedOptionParser(AppCase): def test_parse(self): x = NamespacedOptionParser(['-c:1,3', '4']) @@ -77,7 +74,7 @@ class test_NamespacedOptionParser(Case): self.assertEqual(x.passthrough, '-- .disable_rate_limits=1') -class test_multi_args(Case): +class test_multi_args(AppCase): @patch('socket.gethostname') def test_parse(self, gethostname): @@ -92,32 +89,37 @@ class test_multi_args(Case): it = multi_args(p, cmd='COMMAND', append='*AP*', prefix='*P*', suffix='*S*') names = list(it) - self.assertEqual( - names[0][0:2], - ('*P*jerry*S*', [ - 'COMMAND', '-n *P*jerry*S*', '-Q bar', - '-c 5', '--flag', '--logfile=foo', - '-- .disable_rate_limits=1', '*AP*', - ]), + + def assert_line_in(name, args): + self.assertIn(name, [tup[0] for tup in names]) + argv = None + for item in names: + if item[0] == name: + argv = item[1] + self.assertTrue(argv) + for arg in args: + self.assertIn(arg, argv) + + assert_line_in( + '*P*jerry@*S*', + ['COMMAND', '-n *P*jerry@*S*', '-Q bar', + '-c 5', '--flag', '--logfile=foo', + '-- .disable_rate_limits=1', '*AP*'], ) - self.assertEqual( - names[1][0:2], - ('*P*elaine*S*', [ - 'COMMAND', '-n *P*elaine*S*', '-Q bar', - '-c 5', '--flag', '--logfile=foo', - '-- .disable_rate_limits=1', '*AP*', - ]), + assert_line_in( + '*P*elaine@*S*', + ['COMMAND', '-n *P*elaine@*S*', '-Q bar', + '-c 5', '--flag', '--logfile=foo', + '-- .disable_rate_limits=1', '*AP*'], ) - self.assertEqual( - names[2][0:2], - ('*P*kramer*S*', [ - 'COMMAND', '--loglevel=DEBUG', '-n *P*kramer*S*', - '-Q bar', '--flag', '--logfile=foo', - '-- .disable_rate_limits=1', '*AP*', - ]), + assert_line_in( + '*P*kramer@*S*', + ['COMMAND', '--loglevel=DEBUG', '-n *P*kramer@*S*', + '-Q bar', '--flag', '--logfile=foo', + '-- .disable_rate_limits=1', '*AP*'], ) expand = names[0][2] - self.assertEqual(expand('%h'), '*P*jerry*S*') + self.assertEqual(expand('%h'), '*P*jerry@*S*') self.assertEqual(expand('%n'), 'jerry') names2 = list(multi_args(p, cmd='COMMAND', append='', prefix='*P*', suffix='*S*')) @@ -129,34 +131,36 @@ class test_multi_args(Case): self.assertEqual(len(names3), 10) self.assertEqual( names3[0][0:2], - ('celery1.example.com', - ['COMMAND', '-n celery1.example.com', '-c 5', '']), + ('celery1@example.com', + ['COMMAND', '-n celery1@example.com', '-c 5', '']), ) for i, worker in enumerate(names3[1:]): self.assertEqual( worker[0:2], - ('celery%s.example.com' % (i + 2), - ['COMMAND', '-n celery%s.example.com' % (i + 2), '']), + ('celery%s@example.com' % (i + 2), + ['COMMAND', '-n celery%s@example.com' % (i + 2), '']), ) names4 = list(multi_args(p2, cmd='COMMAND', suffix='""')) self.assertEqual(len(names4), 10) self.assertEqual( names4[0][0:2], - ('celery1', ['COMMAND', '-n celery1', '-c 5', '']), + ('celery1@', + ['COMMAND', '-n celery1@', '-c 5', '']), ) - p3 = NamespacedOptionParser(['foo', '-c:foo', '5']) + p3 = NamespacedOptionParser(['foo@', '-c:foo', '5']) names5 = list(multi_args(p3, cmd='COMMAND', suffix='""')) self.assertEqual( names5[0][0:2], - ('foo', ['COMMAND', '-n foo', '-c 5', '']), + ('foo@', + ['COMMAND', '-n foo@', '-c 5', '']), ) -class test_MultiTool(Case): +class test_MultiTool(AppCase): - def setUp(self): + def setup(self): self.fh = WhateverIO() self.env = {} self.t = MultiTool(env=self.env, fh=self.fh) @@ -193,7 +197,7 @@ class test_MultiTool(Case): self.assertEqual(self.t.retcode, 1) - @patch('celery.bin.celeryd_multi.Popen') + @patch('celery.bin.multi.Popen') def test_waitexec(self, Popen): self.t.note = Mock() pipe = Popen.return_value = Mock() @@ -205,7 +209,8 @@ class test_MultiTool(Case): pipe.wait.return_value = 2 self.assertEqual(self.t.waitexec(['-m', 'foo'], 'path'), 2) self.t.note.assert_called_with( - '* Child terminated with failure code 2') + '* Child terminated with errorcode 2', + ) pipe.wait.return_value = 0 self.assertFalse(self.t.waitexec(['-m', 'foo', 'path'])) @@ -218,7 +223,7 @@ class test_MultiTool(Case): def test_splash(self): self.t.nosplash = False self.t.splash() - self.assertIn('celeryd-multi', self.fh.getvalue()) + self.assertIn('celery multi', self.fh.getvalue()) def test_usage(self): self.t.usage() @@ -236,7 +241,7 @@ class test_MultiTool(Case): def test_restart(self): stop = self.t._stop_nodes = Mock() - self.t.restart(['jerry', 'george'], 'celeryd') + self.t.restart(['jerry', 'george'], 'celery worker') waitexec = self.t.waitexec = Mock() self.assertTrue(stop.called) callback = stop.call_args[1]['callback'] @@ -257,13 +262,15 @@ class test_MultiTool(Case): self.t.getpids = Mock() self.t.getpids.return_value = [2, 3, 4] self.t.shutdown_nodes = Mock() - self.t.stop(['a', 'b', '-INT'], 'celeryd') + self.t.stop(['a', 'b', '-INT'], 'celery worker') self.t.shutdown_nodes.assert_called_with( [2, 3, 4], sig=signal.SIGINT, retry=None, callback=None, ) def test_kill(self): + if not hasattr(signal, 'SIGKILL'): + raise SkipTest('SIGKILL not supported by this platform') self.t.getpids = Mock() self.t.getpids.return_value = [ ('a', None, 10), @@ -272,7 +279,7 @@ class test_MultiTool(Case): ] sig = self.t.signal_node = Mock() - self.t.kill(['a', 'b', 'c'], 'celeryd') + self.t.kill(['a', 'b', 'c'], 'celery worker') sigs = sig.call_args_list self.assertEqual(len(sigs), 3) @@ -288,13 +295,13 @@ class test_MultiTool(Case): def read_pid(self): try: - return {'celeryd@foo.pid': 10, - 'celeryd@bar.pid': 11}[self.path] + return {'foo.pid': 10, + 'bar.pid': 11}[self.path] except KeyError: raise ValueError() Pidfile.side_effect = pids - @patch('celery.bin.celeryd_multi.Pidfile') + @patch('celery.bin.multi.Pidfile') @patch('socket.gethostname') def test_getpids(self, gethostname, Pidfile): gethostname.return_value = 'e.com' @@ -302,29 +309,39 @@ class test_MultiTool(Case): callback = Mock() p = NamespacedOptionParser(['foo', 'bar', 'baz']) - nodes = self.t.getpids(p, 'celeryd', callback=callback) - self.assertEqual(nodes, [ - ('foo.e.com', - ('celeryd', '--pidfile=celeryd@foo.pid', '-n foo.e.com', ''), - 10), - ('bar.e.com', - ('celeryd', '--pidfile=celeryd@bar.pid', '-n bar.e.com', ''), - 11), - ]) - self.assertTrue(callback.called) - callback.assert_called_with( - 'baz.e.com', - ['celeryd', '--pidfile=celeryd@baz.pid', '-n baz.e.com', ''], - None, + nodes = self.t.getpids(p, 'celery worker', callback=callback) + node_0, node_1 = nodes + self.assertEqual(node_0[0], 'foo@e.com') + self.assertEqual( + sorted(node_0[1]), + sorted(('celery worker', '--pidfile=foo.pid', + '-n foo@e.com', '')), ) + self.assertEqual(node_0[2], 10) + + self.assertEqual(node_1[0], 'bar@e.com') + self.assertEqual( + sorted(node_1[1]), + sorted(('celery worker', '--pidfile=bar.pid', + '-n bar@e.com', '')), + ) + self.assertEqual(node_1[2], 11) + self.assertTrue(callback.called) + cargs, _ = callback.call_args + self.assertEqual(cargs[0], 'baz@e.com') + self.assertItemsEqual( + cargs[1], + ['celery worker', '--pidfile=baz.pid', '-n baz@e.com', ''], + ) + self.assertIsNone(cargs[2]) self.assertIn('DOWN', self.fh.getvalue()) # without callback, should work - nodes = self.t.getpids(p, 'celeryd', callback=None) + nodes = self.t.getpids(p, 'celery worker', callback=None) - @patch('celery.bin.celeryd_multi.Pidfile') + @patch('celery.bin.multi.Pidfile') @patch('socket.gethostname') - @patch('celery.bin.celeryd_multi.sleep') + @patch('celery.bin.multi.sleep') def test_shutdown_nodes(self, slepp, gethostname, Pidfile): gethostname.return_value = 'e.com' self.prepare_pidfile_for_getpids(Pidfile) @@ -334,14 +351,20 @@ class test_MultiTool(Case): self.t.node_alive.return_value = False callback = Mock() - self.t.stop(['foo', 'bar', 'baz'], 'celeryd', callback=callback) - sigs = self.t.signal_node.call_args_list + self.t.stop(['foo', 'bar', 'baz'], 'celery worker', callback=callback) + sigs = sorted(self.t.signal_node.call_args_list) self.assertEqual(len(sigs), 2) - self.assertEqual(sigs[0][0], ('foo.e.com', 10, signal.SIGTERM)) - self.assertEqual(sigs[1][0], ('bar.e.com', 11, signal.SIGTERM)) + self.assertIn( + ('foo@e.com', 10, signal.SIGTERM), + [tup[0] for tup in sigs], + ) + self.assertIn( + ('bar@e.com', 11, signal.SIGTERM), + [tup[0] for tup in sigs], + ) self.t.signal_node.return_value = False self.assertTrue(callback.called) - self.t.stop(['foo', 'bar', 'baz'], 'celeryd', callback=None) + self.t.stop(['foo', 'bar', 'baz'], 'celery worker', callback=None) def on_node_alive(pid): if node_alive.call_count > 4: @@ -349,7 +372,7 @@ class test_MultiTool(Case): return False self.t.signal_node.return_value = True self.t.node_alive.side_effect = on_node_alive - self.t.stop(['foo', 'bar', 'baz'], 'celeryd', retry=True) + self.t.stop(['foo', 'bar', 'baz'], 'celery worker', retry=True) @patch('os.kill') def test_node_alive(self, kill): @@ -387,35 +410,35 @@ class test_MultiTool(Case): def test_start(self): self.t.waitexec = Mock() self.t.waitexec.return_value = 0 - self.assertFalse(self.t.start(['foo', 'bar', 'baz'], 'celeryd')) + self.assertFalse(self.t.start(['foo', 'bar', 'baz'], 'celery worker')) self.t.waitexec.return_value = 1 - self.assertFalse(self.t.start(['foo', 'bar', 'baz'], 'celeryd')) + self.assertFalse(self.t.start(['foo', 'bar', 'baz'], 'celery worker')) def test_show(self): - self.t.show(['foo', 'bar', 'baz'], 'celeryd') + self.t.show(['foo', 'bar', 'baz'], 'celery worker') self.assertTrue(self.fh.getvalue()) @patch('socket.gethostname') def test_get(self, gethostname): gethostname.return_value = 'e.com' - self.t.get(['xuzzy.e.com', 'foo', 'bar', 'baz'], 'celeryd') + self.t.get(['xuzzy@e.com', 'foo', 'bar', 'baz'], 'celery worker') self.assertFalse(self.fh.getvalue()) - self.t.get(['foo.e.com', 'foo', 'bar', 'baz'], 'celeryd') + self.t.get(['foo@e.com', 'foo', 'bar', 'baz'], 'celery worker') self.assertTrue(self.fh.getvalue()) @patch('socket.gethostname') def test_names(self, gethostname): gethostname.return_value = 'e.com' - self.t.names(['foo', 'bar', 'baz'], 'celeryd') - self.assertIn('foo.e.com\nbar.e.com\nbaz.e.com', self.fh.getvalue()) + self.t.names(['foo', 'bar', 'baz'], 'celery worker') + self.assertIn('foo@e.com\nbar@e.com\nbaz@e.com', self.fh.getvalue()) def test_execute_from_commandline(self): start = self.t.commands['start'] = Mock() self.t.error = Mock() self.t.execute_from_commandline(['multi', 'start', 'foo', 'bar']) self.assertFalse(self.t.error.called) - start.assert_called_with(['foo', 'bar'], 'celeryd') + start.assert_called_with(['foo', 'bar'], 'celery worker') self.t.error = Mock() self.t.execute_from_commandline(['multi', 'frob', 'foo', 'bar']) @@ -440,10 +463,10 @@ class test_MultiTool(Case): def test_stopwait(self): self.t._stop_nodes = Mock() - self.t.stopwait(['foo', 'bar', 'baz'], 'celeryd') + self.t.stopwait(['foo', 'bar', 'baz'], 'celery worker') self.assertEqual(self.t._stop_nodes.call_args[1]['retry'], 2) - @patch('celery.bin.celeryd_multi.MultiTool') + @patch('celery.bin.multi.MultiTool') def test_main(self, MultiTool): m = MultiTool.return_value = Mock() with self.assertRaises(SystemExit): diff --git a/awx/lib/site-packages/celery/tests/bin/test_celeryd.py b/awx/lib/site-packages/celery/tests/bin/test_worker.py similarity index 71% rename from awx/lib/site-packages/celery/tests/bin/test_celeryd.py rename to awx/lib/site-packages/celery/tests/bin/test_worker.py index 9ed3f242ab..7d4dba3be4 100644 --- a/awx/lib/site-packages/celery/tests/bin/test_celeryd.py +++ b/awx/lib/site-packages/celery/tests/bin/test_worker.py @@ -1,5 +1,4 @@ from __future__ import absolute_import -from __future__ import with_statement import logging import os @@ -7,26 +6,24 @@ import sys from functools import wraps -from mock import Mock, patch -from nose import SkipTest - from billiard import current_process from kombu import Exchange, Queue -from celery import Celery from celery import platforms from celery import signals -from celery import current_app +from celery.app import trace from celery.apps import worker as cd -from celery.bin.celeryd import WorkerCommand, main as celeryd_main +from celery.bin.worker import worker, main as worker_main from celery.exceptions import ImproperlyConfigured, SystemTerminate -from celery.task import trace from celery.utils.log import ensure_process_aware_logger from celery.worker import state -from celery.tests.utils import ( +from celery.tests.case import ( AppCase, + Mock, + SkipTest, WhateverIO, + patch, skip_if_pypy, skip_if_jython, ) @@ -60,83 +57,82 @@ def disable_stdouts(fun): return disable -class _WorkController(object): - - def __init__(self, *args, **kwargs): - pass - - def start(self): - pass - - class Worker(cd.Worker): - WorkController = _WorkController + redirect_stdouts = False - def __init__(self, *args, **kwargs): - super(Worker, self).__init__(*args, **kwargs) - self.redirect_stdouts = False + def start(self, *args, **kwargs): + self.on_start() class test_Worker(WorkerAppCase): - Worker = Worker - def teardown(self): - self.app.conf.CELERY_INCLUDE = () - @disable_stdouts def test_queues_string(self): - celery = Celery(set_as_current=False) - worker = celery.Worker(queues='foo,bar,baz') - worker.init_queues() - self.assertEqual(worker.use_queues, ['foo', 'bar', 'baz']) - self.assertTrue('foo' in celery.amqp.queues) + w = self.app.Worker() + w.setup_queues('foo,bar,baz') + self.assertTrue('foo' in self.app.amqp.queues) @disable_stdouts def test_cpu_count(self): - celery = Celery(set_as_current=False) - with patch('celery.apps.worker.cpu_count') as cpu_count: + with patch('celery.worker.cpu_count') as cpu_count: cpu_count.side_effect = NotImplementedError() - worker = celery.Worker(concurrency=None) - self.assertEqual(worker.concurrency, 2) - worker = celery.Worker(concurrency=5) - self.assertEqual(worker.concurrency, 5) + w = self.app.Worker(concurrency=None) + self.assertEqual(w.concurrency, 2) + w = self.app.Worker(concurrency=5) + self.assertEqual(w.concurrency, 5) @disable_stdouts def test_windows_B_option(self): - celery = Celery(set_as_current=False) - celery.IS_WINDOWS = True + self.app.IS_WINDOWS = True with self.assertRaises(SystemExit): - WorkerCommand(app=celery).run(beat=True) + worker(app=self.app).run(beat=True) def test_setup_concurrency_very_early(self): - x = WorkerCommand() + x = worker() x.run = Mock() with self.assertRaises(ImportError): - x.execute_from_commandline(['celeryd', '-P', 'xyzybox']) + x.execute_from_commandline(['worker', '-P', 'xyzybox']) + + def test_run_from_argv_basic(self): + x = worker(app=self.app) + x.run = Mock() + x.maybe_detach = Mock() + + def run(*args, **kwargs): + pass + x.run = run + x.run_from_argv('celery', []) + self.assertTrue(x.maybe_detach.called) + + def test_maybe_detach(self): + x = worker(app=self.app) + with patch('celery.bin.worker.detached_celeryd') as detached: + x.maybe_detach([]) + self.assertFalse(detached.called) + with self.assertRaises(SystemExit): + x.maybe_detach(['--detach']) + self.assertTrue(detached.called) @disable_stdouts def test_invalid_loglevel_gives_error(self): - x = WorkerCommand(app=Celery(set_as_current=False)) + x = worker(app=self.app) with self.assertRaises(SystemExit): x.run(loglevel='GRIM_REAPER') def test_no_loglevel(self): - app = Celery(set_as_current=False) - app.Worker = Mock() - WorkerCommand(app=app).run(loglevel=None) + self.app.Worker = Mock() + worker(app=self.app).run(loglevel=None) def test_tasklist(self): - celery = Celery(set_as_current=False) - worker = celery.Worker() + worker = self.app.Worker() self.assertTrue(worker.app.tasks) self.assertTrue(worker.app.finalized) self.assertTrue(worker.tasklist(include_builtins=True)) worker.tasklist(include_builtins=False) def test_extra_info(self): - celery = Celery(set_as_current=False) - worker = celery.Worker() + worker = self.app.Worker() worker.loglevel = logging.WARNING self.assertFalse(worker.extra_info()) worker.loglevel = logging.INFO @@ -144,9 +140,10 @@ class test_Worker(WorkerAppCase): @disable_stdouts def test_loglevel_string(self): - worker = self.Worker(loglevel='INFO') + worker = self.Worker(app=self.app, loglevel='INFO') self.assertEqual(worker.loglevel, logging.INFO) + @disable_stdouts def test_run_worker(self): handlers = {} @@ -158,16 +155,16 @@ class test_Worker(WorkerAppCase): p = platforms.signals platforms.signals = Signals() try: - w = self.Worker() + w = self.Worker(app=self.app) w._isatty = False - w.run_worker() + w.on_start() for sig in 'SIGINT', 'SIGHUP', 'SIGTERM': self.assertIn(sig, handlers) handlers.clear() - w = self.Worker() + w = self.Worker(app=self.app) w._isatty = True - w.run_worker() + w.on_start() for sig in 'SIGINT', 'SIGTERM': self.assertIn(sig, handlers) self.assertNotIn('SIGHUP', handlers) @@ -176,8 +173,8 @@ class test_Worker(WorkerAppCase): @disable_stdouts def test_startup_info(self): - worker = self.Worker() - worker.run() + worker = self.Worker(app=self.app) + worker.on_start() self.assertTrue(worker.startup_info()) worker.loglevel = logging.DEBUG self.assertTrue(worker.startup_info()) @@ -186,29 +183,21 @@ class test_Worker(WorkerAppCase): worker.autoscale = 13, 10 self.assertTrue(worker.startup_info()) - worker = self.Worker(queues='foo,bar,baz,xuzzy,do,re,mi') - app = worker.app - prev, app.loader = app.loader, Mock() - try: - app.loader.__module__ = 'acme.baked_beans' - self.assertTrue(worker.startup_info()) - finally: - app.loader = prev + prev_loader = self.app.loader + worker = self.Worker(app=self.app, queues='foo,bar,baz,xuzzy,do,re,mi') + self.app.loader = Mock() + self.app.loader.__module__ = 'acme.baked_beans' + self.assertTrue(worker.startup_info()) - prev, app.loader = app.loader, Mock() - try: - app.loader.__module__ = 'celery.loaders.foo' - self.assertTrue(worker.startup_info()) - finally: - app.loader = prev + self.app.loader = Mock() + self.app.loader.__module__ = 'celery.loaders.foo' + self.assertTrue(worker.startup_info()) from celery.loaders.app import AppLoader - prev, app.loader = app.loader, AppLoader() - try: - self.assertTrue(worker.startup_info()) - finally: - app.loader = prev + self.app.loader = AppLoader(app=self.app) + self.assertTrue(worker.startup_info()) + self.app.loader = prev_loader worker.send_events = True self.assertTrue(worker.startup_info()) @@ -219,104 +208,125 @@ class test_Worker(WorkerAppCase): @disable_stdouts def test_run(self): - self.Worker().run() - self.Worker(purge=True).run() - worker = self.Worker() - worker.run() - - prev, cd.IGNORE_ERRORS = cd.IGNORE_ERRORS, (KeyError, ) - try: - worker.run_worker = Mock() - worker.run_worker.side_effect = KeyError() - worker.run() - finally: - cd.IGNORE_ERRORS = prev + self.Worker(app=self.app).on_start() + self.Worker(app=self.app, purge=True).on_start() + worker = self.Worker(app=self.app) + worker.on_start() @disable_stdouts def test_purge_messages(self): - self.Worker().purge_messages() + self.Worker(app=self.app).purge_messages() @disable_stdouts def test_init_queues(self): - app = current_app + app = self.app c = app.conf - p, app.amqp.queues = app.amqp.queues, app.amqp.Queues({ + app.amqp.queues = app.amqp.Queues({ 'celery': {'exchange': 'celery', 'routing_key': 'celery'}, 'video': {'exchange': 'video', - 'routing_key': 'video'}}) - try: - worker = self.Worker(queues=['video']) - worker.init_queues() - self.assertIn('video', app.amqp.queues) - self.assertIn('video', app.amqp.queues.consume_from) - self.assertIn('celery', app.amqp.queues) - self.assertNotIn('celery', app.amqp.queues.consume_from) + 'routing_key': 'video'}, + }) + worker = self.Worker(app=self.app) + worker.setup_queues(['video']) + self.assertIn('video', app.amqp.queues) + self.assertIn('video', app.amqp.queues.consume_from) + self.assertIn('celery', app.amqp.queues) + self.assertNotIn('celery', app.amqp.queues.consume_from) - c.CELERY_CREATE_MISSING_QUEUES = False - del(app.amqp.queues) - with self.assertRaises(ImproperlyConfigured): - self.Worker(queues=['image']).init_queues() - del(app.amqp.queues) - c.CELERY_CREATE_MISSING_QUEUES = True - worker = self.Worker(queues=['image']) - worker.init_queues() - self.assertIn('image', app.amqp.queues.consume_from) - self.assertEqual(Queue('image', Exchange('image'), - routing_key='image'), app.amqp.queues['image']) - finally: - app.amqp.queues = p + c.CELERY_CREATE_MISSING_QUEUES = False + del(app.amqp.queues) + with self.assertRaises(ImproperlyConfigured): + self.Worker(app=self.app).setup_queues(['image']) + del(app.amqp.queues) + c.CELERY_CREATE_MISSING_QUEUES = True + worker = self.Worker(app=self.app) + worker.setup_queues(['image']) + self.assertIn('image', app.amqp.queues.consume_from) + self.assertEqual( + Queue('image', Exchange('image'), routing_key='image'), + app.amqp.queues['image'], + ) @disable_stdouts def test_autoscale_argument(self): - worker1 = self.Worker(autoscale='10,3') + worker1 = self.Worker(app=self.app, autoscale='10,3') self.assertListEqual(worker1.autoscale, [10, 3]) - worker2 = self.Worker(autoscale='10') + worker2 = self.Worker(app=self.app, autoscale='10') self.assertListEqual(worker2.autoscale, [10, 0]) + self.assert_no_logging_side_effect() def test_include_argument(self): - worker1 = self.Worker(include='some.module') - self.assertListEqual(worker1.include, ['some.module']) - worker2 = self.Worker(include='some.module,another.package') - self.assertListEqual( - worker2.include, - ['some.module', 'another.package'], - ) - self.Worker(include=['os', 'sys']) + worker1 = self.Worker(app=self.app, include='os') + self.assertListEqual(worker1.include, ['os']) + worker2 = self.Worker(app=self.app, + include='os,sys') + self.assertListEqual(worker2.include, ['os', 'sys']) + self.Worker(app=self.app, include=['os', 'sys']) @disable_stdouts def test_unknown_loglevel(self): with self.assertRaises(SystemExit): - WorkerCommand(app=self.app).run(loglevel='ALIEN') - worker1 = self.Worker(loglevel=0xFFFF) + worker(app=self.app).run(loglevel='ALIEN') + worker1 = self.Worker(app=self.app, loglevel=0xFFFF) self.assertEqual(worker1.loglevel, 0xFFFF) + @disable_stdouts def test_warns_if_running_as_privileged_user(self): - app = current_app + app = self.app if app.IS_WINDOWS: raise SkipTest('Not applicable on Windows') - def getuid(): - return 0 - - prev, os.getuid = os.getuid, getuid - try: + with patch('os.getuid') as getuid: + getuid.return_value = 0 + self.app.conf.CELERY_ACCEPT_CONTENT = ['pickle'] + with self.assertRaises(RuntimeError): + worker = self.Worker(app=self.app) + worker.on_start() + cd.C_FORCE_ROOT = True + try: + with self.assertWarnsRegex( + RuntimeWarning, + r'absolutely not recommended'): + worker = self.Worker(app=self.app) + worker.on_start() + finally: + cd.C_FORCE_ROOT = False + self.app.conf.CELERY_ACCEPT_CONTENT = ['json'] with self.assertWarnsRegex( RuntimeWarning, - r'superuser privileges is discouraged'): - worker = self.Worker() - worker.run() - finally: - os.getuid = prev + r'absolutely not recommended'): + worker = self.Worker(app=self.app) + worker.on_start() @disable_stdouts def test_redirect_stdouts(self): - worker = self.Worker() - worker.redirect_stdouts = False - worker.setup_logging() + self.Worker(app=self.app, redirect_stdouts=False) with self.assertRaises(AttributeError): sys.stdout.logger + @disable_stdouts + def test_on_start_custom_logging(self): + self.app.log.redirect_stdouts = Mock() + worker = self.Worker(app=self.app, redirect_stoutds=True) + worker._custom_logging = True + worker.on_start() + self.assertFalse(self.app.log.redirect_stdouts.called) + + def test_setup_logging_no_color(self): + worker = self.Worker( + app=self.app, redirect_stdouts=False, no_color=True, + ) + prev, self.app.log.setup = self.app.log.setup, Mock() + worker.setup_logging() + self.assertFalse(self.app.log.setup.call_args[1]['colorize']) + + @disable_stdouts + def test_startup_info_pool_is_str(self): + worker = self.Worker(app=self.app, redirect_stdouts=False) + worker.pool_cls = 'foo' + worker.startup_info() + def test_redirect_stdouts_already_handled(self): logging_setup = [False] @@ -325,8 +335,8 @@ class test_Worker(WorkerAppCase): logging_setup[0] = True try: - worker = self.Worker() - worker.app.log.__class__._setup = False + worker = self.Worker(app=self.app, redirect_stdouts=False) + worker.app.log.already_setup = False worker.setup_logging() self.assertTrue(logging_setup[0]) with self.assertRaises(AttributeError): @@ -343,7 +353,7 @@ class test_Worker(WorkerAppCase): def osx_proxy_detection_workaround(self): self.proxy_workaround_installed = True - worker = OSXWorker(redirect_stdouts=False) + worker = OSXWorker(app=self.app, redirect_stdouts=False) def install_HUP_nosupport(controller): controller.hup_not_supported_installed = True @@ -376,7 +386,7 @@ class test_Worker(WorkerAppCase): prev = cd.install_worker_restart_handler cd.install_worker_restart_handler = install_worker_restart_handler try: - worker = self.Worker() + worker = self.Worker(app=self.app) worker.app.IS_OSX = False worker.install_platform_tweaks(Controller()) self.assertTrue(restart_worker_handler_installed[0]) @@ -391,7 +401,7 @@ class test_Worker(WorkerAppCase): def on_worker_ready(**kwargs): worker_ready_sent[0] = True - self.Worker().on_consumer_ready(object()) + self.Worker(app=self.app).on_consumer_ready(object()) self.assertTrue(worker_ready_sent[0]) @@ -406,7 +416,7 @@ class test_funs(WorkerAppCase): __import__('setproctitle') except ImportError: raise SkipTest('setproctitle not installed') - worker = Worker(hostname='xyzza') + worker = Worker(app=self.app, hostname='xyzza') prev1, sys.argv = sys.argv, ['Arg0'] try: st = worker.set_process_status('Running') @@ -427,17 +437,17 @@ class test_funs(WorkerAppCase): @disable_stdouts def test_parse_options(self): - cmd = WorkerCommand() - cmd.app = current_app - opts, args = cmd.parse_options('celeryd', ['--concurrency=512']) + cmd = worker() + cmd.app = self.app + opts, args = cmd.parse_options('worker', ['--concurrency=512']) self.assertEqual(opts.concurrency, 512) @disable_stdouts def test_main(self): p, cd.Worker = cd.Worker, Worker - s, sys.argv = sys.argv, ['celeryd', '--discard'] + s, sys.argv = sys.argv, ['worker', '--discard'] try: - celeryd_main() + worker_main(app=self.app) finally: cd.Worker = p sys.argv = s @@ -603,12 +613,9 @@ class test_signal_handlers(WorkerAppCase): @skip_if_pypy @skip_if_jython def test_worker_cry_handler(self, stderr): - if sys.version_info > (2, 5): - handlers = self.psig(cd.install_cry_handler) - self.assertIsNone(handlers['SIGUSR1']('SIGUSR1', object())) - self.assertTrue(stderr.write.called) - else: - raise SkipTest('Needs Python 2.5 or later') + handlers = self.psig(cd.install_cry_handler) + self.assertIsNone(handlers['SIGUSR1']('SIGUSR1', object())) + self.assertTrue(stderr.write.called) @disable_stdouts def test_worker_term_handler_only_stop_MainProcess(self): @@ -637,10 +644,8 @@ class test_signal_handlers(WorkerAppCase): @disable_stdouts @patch('atexit.register') - @patch('os.fork') @patch('os.close') - def test_worker_restart_handler(self, _close, fork, register): - fork.return_value = 0 + def test_worker_restart_handler(self, _close, register): if getattr(os, 'execv', None) is None: raise SkipTest('platform does not have excv') argv = [] @@ -658,10 +663,6 @@ class test_signal_handlers(WorkerAppCase): callback = register.call_args[0][0] callback() self.assertTrue(argv) - argv[:] = [] - fork.return_value = 1 - callback() - self.assertFalse(argv) finally: os.execv = execv state.should_stop = False diff --git a/awx/lib/site-packages/celery/tests/utils.py b/awx/lib/site-packages/celery/tests/case.py similarity index 60% rename from awx/lib/site-packages/celery/tests/utils.py rename to awx/lib/site-packages/celery/tests/case.py index 37fd8c575d..c453721f10 100644 --- a/awx/lib/site-packages/celery/tests/utils.py +++ b/awx/lib/site-packages/celery/tests/case.py @@ -1,5 +1,4 @@ from __future__ import absolute_import -from __future__ import with_statement try: import unittest # noqa @@ -10,32 +9,125 @@ except AttributeError: from unittest2.util import safe_repr, unorderable_list_difference # noqa import importlib +import inspect import logging import os import platform import re import sys +import threading import time import warnings -try: - import __builtin__ as builtins -except ImportError: # py3k - import builtins # noqa from contextlib import contextmanager +from copy import deepcopy +from datetime import datetime, timedelta from functools import partial, wraps from types import ModuleType -import mock +try: + from unittest import mock +except ImportError: + import mock # noqa from nose import SkipTest +from kombu import Queue from kombu.log import NullHandler -from kombu.utils import nested +from kombu.utils import nested, symbol_by_name -from celery.app import app_or_default -from celery.utils.compat import WhateverIO +from celery import Celery +from celery.app import current_app +from celery.backends.cache import CacheBackend, DummyClient +from celery.five import ( + WhateverIO, builtins, items, reraise, + string_t, values, open_fqdn, +) from celery.utils.functional import noop +from celery.utils.imports import qualname -from .compat import catch_warnings +__all__ = [ + 'Case', 'AppCase', 'Mock', 'MagicMock', + 'patch', 'call', 'sentinel', 'skip_unless_module', + 'wrap_logger', 'with_environ', 'sleepdeprived', + 'skip_if_environ', 'todo', 'skip', 'skip_if', + 'skip_unless', 'mask_modules', 'override_stdouts', 'mock_module', + 'replace_module_value', 'sys_platform', 'reset_modules', + 'patch_modules', 'mock_context', 'mock_open', 'patch_many', + 'assert_signal_called', 'skip_if_pypy', + 'skip_if_jython', 'body_from_sig', 'restore_logging', +] +patch = mock.patch +call = mock.call +sentinel = mock.sentinel +MagicMock = mock.MagicMock + +CASE_REDEFINES_SETUP = """\ +{name} (subclass of AppCase) redefines private "setUp", should be: "setup"\ +""" +CASE_REDEFINES_TEARDOWN = """\ +{name} (subclass of AppCase) redefines private "tearDown", \ +should be: "teardown"\ +""" +CASE_LOG_REDIRECT_EFFECT = """\ +Test {0} did not disable LoggingProxy for {1}\ +""" +CASE_LOG_LEVEL_EFFECT = """\ +Test {0} Modified the level of the root logger\ +""" +CASE_LOG_HANDLER_EFFECT = """\ +Test {0} Modified handlers for the root logger\ +""" + +CELERY_TEST_CONFIG = { + #: Don't want log output when running suite. + 'CELERYD_HIJACK_ROOT_LOGGER': False, + 'CELERY_SEND_TASK_ERROR_EMAILS': False, + 'CELERY_DEFAULT_QUEUE': 'testcelery', + 'CELERY_DEFAULT_EXCHANGE': 'testcelery', + 'CELERY_DEFAULT_ROUTING_KEY': 'testcelery', + 'CELERY_QUEUES': ( + Queue('testcelery', routing_key='testcelery'), + ), + 'CELERY_ENABLE_UTC': True, + 'CELERY_TIMEZONE': 'UTC', + 'CELERYD_LOG_COLOR': False, + + # Mongo results tests (only executed if installed and running) + 'CELERY_MONGODB_BACKEND_SETTINGS': { + 'host': os.environ.get('MONGO_HOST') or 'localhost', + 'port': os.environ.get('MONGO_PORT') or 27017, + 'database': os.environ.get('MONGO_DB') or 'celery_unittests', + 'taskmeta_collection': (os.environ.get('MONGO_TASKMETA_COLLECTION') + or 'taskmeta_collection'), + 'user': os.environ.get('MONGO_USER'), + 'password': os.environ.get('MONGO_PASSWORD'), + } +} + + +class Trap(object): + + def __getattr__(self, name): + raise RuntimeError('Test depends on current_app') + + +class UnitLogging(symbol_by_name(Celery.log_cls)): + + def __init__(self, *args, **kwargs): + super(UnitLogging, self).__init__(*args, **kwargs) + self.already_setup = True + + +def UnitApp(name=None, broker=None, backend=None, + set_as_current=False, log=UnitLogging, **kwargs): + + app = Celery(name or 'celery.tests', + broker=broker or 'memory://', + backend=backend or 'cache+memory://', + set_as_current=set_as_current, + log=log, + **kwargs) + app.add_defaults(deepcopy(CELERY_TEST_CONFIG)) + return app class Mock(mock.Mock): @@ -43,7 +135,7 @@ class Mock(mock.Mock): def __init__(self, *args, **kwargs): attrs = kwargs.pop('attrs', None) or {} super(Mock, self).__init__(*args, **kwargs) - for attr_name, attr_value in attrs.items(): + for attr_name, attr_value in items(attrs): setattr(self, attr_name, attr_value) @@ -73,7 +165,7 @@ class _AssertRaisesBaseContext(object): self.expected = expected self.failureException = test_case.failureException self.obj_name = None - if isinstance(expected_regex, basestring): + if isinstance(expected_regex, string_t): expected_regex = re.compile(expected_regex) self.expected_regex = expected_regex @@ -85,10 +177,10 @@ class _AssertWarnsContext(_AssertRaisesBaseContext): # The __warningregistry__'s need to be in a pristine state for tests # to work properly. warnings.resetwarnings() - for v in sys.modules.values(): + for v in list(values(sys.modules)): if getattr(v, '__warningregistry__', None): v.__warningregistry__ = {} - self.warnings_manager = catch_warnings(record=True) + self.warnings_manager = warnings.catch_warnings(record=True) self.warnings = self.warnings_manager.__enter__() warnings.simplefilter('always', self.expected) return self @@ -141,7 +233,7 @@ class Case(unittest.TestCase): def assertDictContainsSubset(self, expected, actual, msg=None): missing, mismatched = [], [] - for key, value in expected.iteritems(): + for key, value in items(expected): if key not in actual: missing.append(key) elif value != actual[key]: @@ -181,32 +273,120 @@ class Case(unittest.TestCase): errors = [] if missing: errors.append( - 'Expected, but missing:\n %s' % (safe_repr(missing), ), + 'Expected, but missing:\n %s' % (safe_repr(missing), ) ) if unexpected: errors.append( - 'Unexpected, but present:\n %s' % (safe_repr(unexpected), ), + 'Unexpected, but present:\n %s' % (safe_repr(unexpected), ) ) if errors: standardMsg = '\n'.join(errors) self.fail(self._formatMessage(msg, standardMsg)) +def depends_on_current_app(fun): + if inspect.isclass(fun): + fun.contained = False + else: + @wraps(fun) + def __inner(self, *args, **kwargs): + self.app.set_current() + return fun(self, *args, **kwargs) + return __inner + + class AppCase(Case): + contained = True + + def __init__(self, *args, **kwargs): + super(AppCase, self).__init__(*args, **kwargs) + if self.__class__.__dict__.get('setUp'): + raise RuntimeError( + CASE_REDEFINES_SETUP.format(name=qualname(self)), + ) + if self.__class__.__dict__.get('tearDown'): + raise RuntimeError( + CASE_REDEFINES_TEARDOWN.format(name=qualname(self)), + ) + + def Celery(self, *args, **kwargs): + return UnitApp(*args, **kwargs) def setUp(self): - from celery.app import current_app - from celery.backends.cache import CacheBackend, DummyClient - app = self.app = self._current_app = current_app() - if isinstance(app.backend, CacheBackend): - if isinstance(app.backend.client, DummyClient): - app.backend.client.cache.clear() - app.backend._cache.clear() - self.setup() + self._threads_at_setup = list(threading.enumerate()) + from celery import _state + from celery import result + result.task_join_will_block = \ + _state.task_join_will_block = lambda: False + self._current_app = current_app() + self._default_app = _state.default_app + trap = Trap() + _state.set_default_app(trap) + _state._tls.current_app = trap + + self.app = self.Celery(set_as_current=False) + if not self.contained: + self.app.set_current() + root = logging.getLogger() + self.__rootlevel = root.level + self.__roothandlers = root.handlers + _state._set_task_join_will_block(False) + try: + self.setup() + except: + self._teardown_app() + raise + + def _teardown_app(self): + from celery.utils.log import LoggingProxy + assert sys.stdout + assert sys.stderr + assert sys.__stdout__ + assert sys.__stderr__ + this = self._get_test_name() + if isinstance(sys.stdout, LoggingProxy) or \ + isinstance(sys.__stdout__, LoggingProxy): + raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stdout')) + if isinstance(sys.stderr, LoggingProxy) or \ + isinstance(sys.__stderr__, LoggingProxy): + raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stderr')) + backend = self.app.__dict__.get('backend') + if backend is not None: + if isinstance(backend, CacheBackend): + if isinstance(backend.client, DummyClient): + backend.client.cache.clear() + backend._cache.clear() + from celery._state import ( + _tls, set_default_app, _set_task_join_will_block, + ) + _set_task_join_will_block(False) + + set_default_app(self._default_app) + _tls.current_app = self._current_app + if self.app is not self._current_app: + self.app.close() + self.app = None + self.assertEqual( + self._threads_at_setup, list(threading.enumerate()), + ) + + def _get_test_name(self): + return '.'.join([self.__class__.__name__, self._testMethodName]) def tearDown(self): - self.teardown() - self._current_app.set_current() + try: + self.teardown() + finally: + self._teardown_app() + self.assert_no_logging_side_effect() + + def assert_no_logging_side_effect(self): + this = self._get_test_name() + root = logging.getLogger() + if root.level != self.__rootlevel: + raise RuntimeError(CASE_LOG_LEVEL_EFFECT.format(this)) + if root.handlers != self.__roothandlers: + raise RuntimeError(CASE_LOG_HANDLER_EFFECT.format(this)) def setup(self): pass @@ -232,31 +412,6 @@ def wrap_logger(logger, loglevel=logging.ERROR): logger.handlers = old_handlers -@contextmanager -def eager_tasks(): - app = app_or_default() - - prev = app.conf.CELERY_ALWAYS_EAGER - app.conf.CELERY_ALWAYS_EAGER = True - try: - yield True - finally: - app.conf.CELERY_ALWAYS_EAGER = prev - - -def with_eager_tasks(fun): - - @wraps(fun) - def _inner(*args, **kwargs): - app = app_or_default() - prev = app.conf.CELERY_ALWAYS_EAGER - app.conf.CELERY_ALWAYS_EAGER = True - try: - return fun(*args, **kwargs) - finally: - app.conf.CELERY_ALWAYS_EAGER = prev - - def with_environ(env_name, env_value): def _envpatched(fun): @@ -268,8 +423,7 @@ def with_environ(env_name, env_value): try: return fun(*args, **kwargs) finally: - if prev_val is not None: - os.environ[env_name] = prev_val + os.environ[env_name] = prev_val or '' return _patch_environ return _envpatched @@ -309,10 +463,6 @@ def skip_if_environ(env_var_name): return _wrap_test -def skip_if_quick(fun): - return skip_if_environ('QUICKTEST')(fun) - - def _skip_test(reason, sign): def _wrap_test(fun): @@ -357,14 +507,14 @@ def mask_modules(*modnames): For example: - >>> with missing_modules('sys'): + >>> with mask_modules('sys'): ... try: ... import sys ... except ImportError: ... print 'sys not found' sys not found - >>> import sys + >>> import sys # noqa >>> sys.version (2, 5, 2, 'final', 0) @@ -400,7 +550,7 @@ def override_stdouts(): sys.stderr = sys.__stderr__ = prev_err -def patch(module, name, mocked): +def _old_patch(module, name, mocked): module = importlib.import_module(module) def _patch(fun): @@ -468,11 +618,12 @@ def reset_modules(*modules): def patch_modules(*modules): prev = {} for mod in modules: - prev[mod], sys.modules[mod] = sys.modules[mod], ModuleType(mod) + prev[mod] = sys.modules.get(mod) + sys.modules[mod] = ModuleType(mod) try: yield finally: - for name, mod in prev.iteritems(): + for name, mod in items(prev): if mod is None: sys.modules.pop(name, None) else: @@ -518,7 +669,7 @@ def mock_context(mock, typ=Mock): def on_exit(*x): if x[0]: - raise x[0], x[1], x[2] + reraise(x[0], x[1], x[2]) context.__exit__.side_effect = on_exit context.__enter__.return_value = context try: @@ -529,7 +680,7 @@ def mock_context(mock, typ=Mock): @contextmanager def mock_open(typ=WhateverIO, side_effect=None): - with mock.patch('__builtin__.open') as open_: + with patch(open_fqdn) as open_: with mock_context(open_) as context: if side_effect is not None: context.__enter__.side_effect = side_effect @@ -539,27 +690,7 @@ def mock_open(typ=WhateverIO, side_effect=None): def patch_many(*targets): - return nested(*[mock.patch(target) for target in targets]) - - -@contextmanager -def patch_settings(app=None, **config): - if app is None: - from celery import current_app - app = current_app - prev = {} - for key, value in config.iteritems(): - try: - prev[key] = getattr(app.conf, key) - except AttributeError: - pass - setattr(app.conf, key, value) - - try: - yield app.conf - finally: - for key, value in prev.iteritems(): - setattr(app.conf, key, value) + return nested(*[patch(target) for target in targets]) @contextmanager @@ -592,3 +723,47 @@ def skip_if_jython(fun): raise SkipTest('does not work on Jython') return fun(*args, **kwargs) return _inner + + +def body_from_sig(app, sig, utc=True): + sig.freeze() + callbacks = sig.options.pop('link', None) + errbacks = sig.options.pop('link_error', None) + countdown = sig.options.pop('countdown', None) + if countdown: + eta = app.now() + timedelta(seconds=countdown) + else: + eta = sig.options.pop('eta', None) + if eta and isinstance(eta, datetime): + eta = eta.isoformat() + expires = sig.options.pop('expires', None) + if expires and isinstance(expires, int): + expires = app.now() + timedelta(seconds=expires) + if expires and isinstance(expires, datetime): + expires = expires.isoformat() + return { + 'task': sig.task, + 'id': sig.id, + 'args': sig.args, + 'kwargs': sig.kwargs, + 'callbacks': [dict(s) for s in callbacks] if callbacks else None, + 'errbacks': [dict(s) for s in errbacks] if errbacks else None, + 'eta': eta, + 'utc': utc, + 'expires': expires, + } + + +@contextmanager +def restore_logging(): + outs = sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__ + root = logging.getLogger() + level = root.level + handlers = root.handlers + + try: + yield + finally: + sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__ = outs + root.level = level + root.handlers[:] = handlers diff --git a/awx/lib/site-packages/celery/tests/compat.py b/awx/lib/site-packages/celery/tests/compat.py deleted file mode 100644 index 30eb853b0d..0000000000 --- a/awx/lib/site-packages/celery/tests/compat.py +++ /dev/null @@ -1,85 +0,0 @@ -from __future__ import absolute_import - -import sys - - -class WarningMessage(object): - - """Holds the result of a single showwarning() call.""" - - _WARNING_DETAILS = ('message', 'category', 'filename', 'lineno', 'file', - 'line') - - def __init__(self, message, category, filename, lineno, file=None, - line=None): - local_values = locals() - for attr in self._WARNING_DETAILS: - setattr(self, attr, local_values[attr]) - - self._category_name = category and category.__name__ or None - - def __str__(self): - return ('{message : %r, category : %r, filename : %r, lineno : %s, ' - 'line : %r}' % (self.message, self._category_name, - self.filename, self.lineno, self.line)) - - -class catch_warnings(object): - - """A context manager that copies and restores the warnings filter upon - exiting the context. - - The 'record' argument specifies whether warnings should be captured by a - custom implementation of warnings.showwarning() and be appended to a list - returned by the context manager. Otherwise None is returned by the context - manager. The objects appended to the list are arguments whose attributes - mirror the arguments to showwarning(). - - The 'module' argument is to specify an alternative module to the module - named 'warnings' and imported under that name. This argument is only - useful when testing the warnings module itself. - - """ - - def __init__(self, record=False, module=None): - """Specify whether to record warnings and if an alternative module - should be used other than sys.modules['warnings']. - - For compatibility with Python 3.0, please consider all arguments to be - keyword-only. - - """ - self._record = record - self._module = module is None and sys.modules['warnings'] or module - self._entered = False - - def __repr__(self): - args = [] - if self._record: - args.append('record=True') - if self._module is not sys.modules['warnings']: - args.append('module=%r' % self._module) - name = type(self).__name__ - return '%s(%s)' % (name, ', '.join(args)) - - def __enter__(self): - if self._entered: - raise RuntimeError('Cannot enter %r twice' % self) - self._entered = True - self._filters = self._module.filters - self._module.filters = self._filters[:] - self._showwarning = self._module.showwarning - if self._record: - log = [] - - def showwarning(*args, **kwargs): - log.append(WarningMessage(*args, **kwargs)) - - self._module.showwarning = showwarning - return log - - def __exit__(self, *exc_info): - if not self._entered: - raise RuntimeError('Cannot exit %r without entering first' % self) - self._module.filters = self._filters - self._module.showwarning = self._showwarning diff --git a/awx/lib/site-packages/celery/tests/compat_modules/test_compat.py b/awx/lib/site-packages/celery/tests/compat_modules/test_compat.py new file mode 100644 index 0000000000..d285188e09 --- /dev/null +++ b/awx/lib/site-packages/celery/tests/compat_modules/test_compat.py @@ -0,0 +1,82 @@ +from __future__ import absolute_import + +from datetime import timedelta + +import sys +sys.modules.pop('celery.task', None) + +from celery.schedules import schedule +from celery.task import ( + periodic_task, + PeriodicTask +) +from celery.utils.timeutils import timedelta_seconds + +from celery.tests.case import AppCase, depends_on_current_app + + +class test_Task(AppCase): + + def test_base_task_inherits_magic_kwargs_from_app(self): + from celery.task import Task as OldTask + + class timkX(OldTask): + abstract = True + + with self.Celery(set_as_current=False, + accept_magic_kwargs=True) as app: + timkX.bind(app) + # see #918 + self.assertFalse(timkX.accept_magic_kwargs) + + from celery import Task as NewTask + + class timkY(NewTask): + abstract = True + + timkY.bind(app) + self.assertFalse(timkY.accept_magic_kwargs) + + +@depends_on_current_app +class test_periodic_tasks(AppCase): + + def setup(self): + @periodic_task(app=self.app, shared=False, + run_every=schedule(timedelta(hours=1), app=self.app)) + def my_periodic(): + pass + self.my_periodic = my_periodic + + def now(self): + return self.app.now() + + def test_must_have_run_every(self): + with self.assertRaises(NotImplementedError): + type('Foo', (PeriodicTask, ), {'__module__': __name__}) + + def test_remaining_estimate(self): + s = self.my_periodic.run_every + self.assertIsInstance( + s.remaining_estimate(s.maybe_make_aware(self.now())), + timedelta) + + def test_is_due_not_due(self): + due, remaining = self.my_periodic.run_every.is_due(self.now()) + self.assertFalse(due) + # This assertion may fail if executed in the + # first minute of an hour, thus 59 instead of 60 + self.assertGreater(remaining, 59) + + def test_is_due(self): + p = self.my_periodic + due, remaining = p.run_every.is_due( + self.now() - p.run_every.run_every, + ) + self.assertTrue(due) + self.assertEqual(remaining, + timedelta_seconds(p.run_every.run_every)) + + def test_schedule_repr(self): + p = self.my_periodic + self.assertTrue(repr(p.run_every)) diff --git a/awx/lib/site-packages/celery/tests/utilities/test_compat.py b/awx/lib/site-packages/celery/tests/compat_modules/test_compat_utils.py similarity index 69% rename from awx/lib/site-packages/celery/tests/utilities/test_compat.py rename to awx/lib/site-packages/celery/tests/compat_modules/test_compat_utils.py index 07ec3b5951..b041a0b3e8 100644 --- a/awx/lib/site-packages/celery/tests/utilities/test_compat.py +++ b/awx/lib/site-packages/celery/tests/compat_modules/test_compat_utils.py @@ -1,14 +1,15 @@ from __future__ import absolute_import - import celery + from celery.app.task import Task as ModernTask from celery.task.base import Task as CompatTask -from celery.tests.utils import Case +from celery.tests.case import AppCase, depends_on_current_app -class test_MagicModule(Case): +@depends_on_current_app +class test_MagicModule(AppCase): def test_class_property_set_without_type(self): self.assertTrue(ModernTask.__dict__['app'].__get__(CompatTask())) @@ -21,24 +22,14 @@ class test_MagicModule(Case): class X(CompatTask): pass - - app = celery.Celery(set_as_current=False) - ModernTask.__dict__['app'].__set__(X(), app) - self.assertEqual(X.app, app) + ModernTask.__dict__['app'].__set__(X(), self.app) + self.assertIs(X.app, self.app) def test_dir(self): self.assertTrue(dir(celery.messaging)) def test_direct(self): - import sys - prev_celery = sys.modules.pop('celery', None) - prev_task = sys.modules.pop('celery.task', None) - try: - import celery - self.assertTrue(celery.task) - finally: - sys.modules['celery'] = prev_celery - sys.modules['celery.task'] = prev_task + self.assertTrue(celery.task) def test_app_attrs(self): self.assertEqual(celery.task.control.broadcast, diff --git a/awx/lib/site-packages/celery/tests/compat_modules/test_decorators.py b/awx/lib/site-packages/celery/tests/compat_modules/test_decorators.py index b8e3c4ee62..9f5dff9473 100644 --- a/awx/lib/site-packages/celery/tests/compat_modules/test_decorators.py +++ b/awx/lib/site-packages/celery/tests/compat_modules/test_decorators.py @@ -1,20 +1,26 @@ from __future__ import absolute_import -from __future__ import with_statement + +import warnings from celery.task import base -from celery.tests.compat import catch_warnings -from celery.tests.utils import Case +from celery.tests.case import AppCase, depends_on_current_app def add(x, y): return x + y -class test_decorators(Case): +@depends_on_current_app +class test_decorators(AppCase): - def setUp(self): - with catch_warnings(record=True): + def test_task_alias(self): + from celery import task + self.assertTrue(task.__file__) + self.assertTrue(task(add)) + + def setup(self): + with warnings.catch_warnings(record=True): from celery import decorators self.decorators = decorators diff --git a/awx/lib/site-packages/celery/tests/tasks/test_http.py b/awx/lib/site-packages/celery/tests/compat_modules/test_http.py similarity index 81% rename from awx/lib/site-packages/celery/tests/tasks/test_http.py rename to awx/lib/site-packages/celery/tests/compat_modules/test_http.py index 3d50dcb961..993142fad0 100644 --- a/awx/lib/site-packages/celery/tests/tasks/test_http.py +++ b/awx/lib/site-packages/celery/tests/compat_modules/test_http.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import -from __future__ import with_statement +from __future__ import absolute_import, unicode_literals from contextlib import contextmanager from functools import wraps @@ -12,28 +11,27 @@ except ImportError: # py3k from anyjson import dumps from kombu.utils.encoding import from_utf8 +from celery.five import StringIO, items from celery.task import http -from celery.tests.utils import Case, eager_tasks -from celery.utils.compat import StringIO +from celery.tests.case import AppCase, Case @contextmanager def mock_urlopen(response_method): - import urllib2 - urlopen = urllib2.urlopen + urlopen = http.urlopen @wraps(urlopen) def _mocked(url, *args, **kwargs): response_data, headers = response_method(url) return addinfourl(StringIO(response_data), headers, url) - urllib2.urlopen = _mocked + http.urlopen = _mocked try: yield True finally: - urllib2.urlopen = urlopen + http.urlopen = urlopen def _response(res): @@ -56,10 +54,10 @@ class test_encodings(Case): def test_utf8dict(self): uk = 'foobar' - d = {u'følelser ær langé': u'ærbadægzaå寨Å', + d = {'følelser ær langé': 'ærbadægzaå寨Å', from_utf8(uk): from_utf8('xuzzybaz')} - for key, value in http.utf8dict(d.items()).items(): + for key, value in items(http.utf8dict(items(d))): self.assertIsInstance(key, str) self.assertIsInstance(value, str) @@ -98,7 +96,7 @@ class test_MutableURL(Case): self.assertEqual(url.query, {'zzz': 'xxx'}) -class test_HttpDispatch(Case): +class test_HttpDispatch(AppCase): def test_dispatch_success(self): with mock_urlopen(success_response(100)): @@ -141,16 +139,20 @@ class test_HttpDispatch(Case): self.assertEqual(d.dispatch(), 100) -class test_URL(Case): +class test_URL(AppCase): def test_URL_get_async(self): - with eager_tasks(): - with mock_urlopen(success_response(100)): - d = http.URL('http://example.com/mul').get_async(x=10, y=10) - self.assertEqual(d.get(), 100) + self.app.conf.CELERY_ALWAYS_EAGER = True + with mock_urlopen(success_response(100)): + d = http.URL( + 'http://example.com/mul', app=self.app, + ).get_async(x=10, y=10) + self.assertEqual(d.get(), 100) def test_URL_post_async(self): - with eager_tasks(): - with mock_urlopen(success_response(100)): - d = http.URL('http://example.com/mul').post_async(x=10, y=10) - self.assertEqual(d.get(), 100) + self.app.conf.CELERY_ALWAYS_EAGER = True + with mock_urlopen(success_response(100)): + d = http.URL( + 'http://example.com/mul', app=self.app, + ).post_async(x=10, y=10) + self.assertEqual(d.get(), 100) diff --git a/awx/lib/site-packages/celery/tests/compat_modules/test_messaging.py b/awx/lib/site-packages/celery/tests/compat_modules/test_messaging.py index 8e606ceaf4..780c2f7b71 100644 --- a/awx/lib/site-packages/celery/tests/compat_modules/test_messaging.py +++ b/awx/lib/site-packages/celery/tests/compat_modules/test_messaging.py @@ -1,17 +1,11 @@ from __future__ import absolute_import from celery import messaging -from celery.tests.utils import Case +from celery.tests.case import AppCase, depends_on_current_app -class test_compat_messaging_module(Case): - - def test_with_connection(self): - - def foo(**kwargs): - pass - - self.assertTrue(messaging.with_connection(foo)) +@depends_on_current_app +class test_compat_messaging_module(AppCase): def test_get_consume_set(self): conn = messaging.establish_connection() diff --git a/awx/lib/site-packages/celery/tests/tasks/test_sets.py b/awx/lib/site-packages/celery/tests/compat_modules/test_sets.py similarity index 55% rename from awx/lib/site-packages/celery/tests/tasks/test_sets.py rename to awx/lib/site-packages/celery/tests/compat_modules/test_sets.py index 7258ae2ddc..c1d2c16fad 100644 --- a/awx/lib/site-packages/celery/tests/tasks/test_sets.py +++ b/awx/lib/site-packages/celery/tests/compat_modules/test_sets.py @@ -1,44 +1,90 @@ from __future__ import absolute_import -from __future__ import with_statement import anyjson +import warnings -from celery import current_app +from celery import uuid +from celery.result import TaskSetResult from celery.task import Task -from celery.task.sets import subtask, TaskSet from celery.canvas import Signature -from celery.tests.utils import Case +from celery.tests.tasks.test_result import make_mock_group +from celery.tests.case import AppCase, Mock, patch -class MockTask(Task): - name = 'tasks.add' +class SetsCase(AppCase): - def run(self, x, y, **kwargs): - return x + y + def setup(self): + with warnings.catch_warnings(record=True): + from celery.task import sets + self.sets = sets + self.subtask = sets.subtask + self.TaskSet = sets.TaskSet - @classmethod - def apply_async(cls, args, kwargs, **options): - return (args, kwargs, options) + class MockTask(Task): + app = self.app + name = 'tasks.add' - @classmethod - def apply(cls, args, kwargs, **options): - return (args, kwargs, options) + def run(self, x, y, **kwargs): + return x + y + + @classmethod + def apply_async(cls, args, kwargs, **options): + return (args, kwargs, options) + + @classmethod + def apply(cls, args, kwargs, **options): + return (args, kwargs, options) + self.MockTask = MockTask -class test_subtask(Case): +class test_TaskSetResult(AppCase): + + def setup(self): + self.size = 10 + self.ts = TaskSetResult(uuid(), make_mock_group(self.app, self.size)) + + def test_total(self): + self.assertEqual(self.ts.total, self.size) + + def test_compat_properties(self): + self.assertEqual(self.ts.taskset_id, self.ts.id) + self.ts.taskset_id = 'foo' + self.assertEqual(self.ts.taskset_id, 'foo') + + def test_compat_subtasks_kwarg(self): + x = TaskSetResult(uuid(), subtasks=[1, 2, 3]) + self.assertEqual(x.results, [1, 2, 3]) + + def test_itersubtasks(self): + it = self.ts.itersubtasks() + + for i, t in enumerate(it): + self.assertEqual(t.get(), i) + + +class test_App(AppCase): + + def test_TaskSet(self): + with warnings.catch_warnings(record=True): + ts = self.app.TaskSet() + self.assertListEqual(ts.tasks, []) + self.assertIs(ts.app, self.app) + + +class test_subtask(SetsCase): def test_behaves_like_type(self): - s = subtask('tasks.add', (2, 2), {'cache': True}, - {'routing_key': 'CPU-bound'}) - self.assertDictEqual(subtask(s), s) + s = self.subtask('tasks.add', (2, 2), {'cache': True}, + {'routing_key': 'CPU-bound'}) + self.assertDictEqual(self.subtask(s), s) def test_task_argument_can_be_task_cls(self): - s = subtask(MockTask, (2, 2)) - self.assertEqual(s.task, MockTask.name) + s = self.subtask(self.MockTask, (2, 2)) + self.assertEqual(s.task, self.MockTask.name) def test_apply_async(self): - s = MockTask.subtask( + s = self.MockTask.subtask( (2, 2), {'cache': True}, {'routing_key': 'CPU-bound'}, ) args, kwargs, options = s.apply_async() @@ -47,7 +93,7 @@ class test_subtask(Case): self.assertDictEqual(options, {'routing_key': 'CPU-bound'}) def test_delay_argmerge(self): - s = MockTask.subtask( + s = self.MockTask.subtask( (2, ), {'cache': True}, {'routing_key': 'CPU-bound'}, ) args, kwargs, options = s.delay(10, cache=False, other='foo') @@ -56,7 +102,7 @@ class test_subtask(Case): self.assertDictEqual(options, {'routing_key': 'CPU-bound'}) def test_apply_async_argmerge(self): - s = MockTask.subtask( + s = self.MockTask.subtask( (2, ), {'cache': True}, {'routing_key': 'CPU-bound'}, ) args, kwargs, options = s.apply_async((10, ), @@ -70,7 +116,7 @@ class test_subtask(Case): 'exchange': 'fast'}) def test_apply_argmerge(self): - s = MockTask.subtask( + s = self.MockTask.subtask( (2, ), {'cache': True}, {'routing_key': 'CPU-bound'}, ) args, kwargs, options = s.apply((10, ), @@ -85,52 +131,55 @@ class test_subtask(Case): ) def test_is_JSON_serializable(self): - s = MockTask.subtask( + s = self.MockTask.subtask( (2, ), {'cache': True}, {'routing_key': 'CPU-bound'}, ) s.args = list(s.args) # tuples are not preserved # but this doesn't matter. - self.assertEqual(s, subtask(anyjson.loads(anyjson.dumps(s)))) + self.assertEqual(s, self.subtask(anyjson.loads(anyjson.dumps(s)))) def test_repr(self): - s = MockTask.subtask((2, ), {'cache': True}) + s = self.MockTask.subtask((2, ), {'cache': True}) self.assertIn('2', repr(s)) self.assertIn('cache=True', repr(s)) def test_reduce(self): - s = MockTask.subtask((2, ), {'cache': True}) + s = self.MockTask.subtask((2, ), {'cache': True}) cls, args = s.__reduce__() self.assertDictEqual(dict(cls(*args)), dict(s)) -class test_TaskSet(Case): +class test_TaskSet(SetsCase): def test_task_arg_can_be_iterable__compat(self): - ts = TaskSet([MockTask.subtask((i, i)) - for i in (2, 4, 8)]) + ts = self.TaskSet([self.MockTask.subtask((i, i)) + for i in (2, 4, 8)], app=self.app) self.assertEqual(len(ts), 3) def test_respects_ALWAYS_EAGER(self): - app = current_app + app = self.app - class MockTaskSet(TaskSet): + class MockTaskSet(self.TaskSet): applied = 0 def apply(self, *args, **kwargs): self.applied += 1 ts = MockTaskSet( - [MockTask.subtask((i, i)) for i in (2, 4, 8)], + [self.MockTask.subtask((i, i)) for i in (2, 4, 8)], + app=self.app, ) app.conf.CELERY_ALWAYS_EAGER = True - try: - ts.apply_async() - finally: - app.conf.CELERY_ALWAYS_EAGER = False + ts.apply_async() self.assertEqual(ts.applied, 1) + app.conf.CELERY_ALWAYS_EAGER = False + + with patch('celery.task.sets.get_current_worker_task') as gwt: + parent = gwt.return_value = Mock() + ts.apply_async() + self.assertTrue(parent.add_trail.called) def test_apply_async(self): - applied = [0] class mocksubtask(Signature): @@ -138,8 +187,8 @@ class test_TaskSet(Case): def apply_async(self, *args, **kwargs): applied[0] += 1 - ts = TaskSet([mocksubtask(MockTask, (i, i)) - for i in (2, 4, 8)]) + ts = self.TaskSet([mocksubtask(self.MockTask, (i, i)) + for i in (2, 4, 8)], app=self.app) ts.apply_async() self.assertEqual(applied[0], 3) @@ -152,9 +201,10 @@ class test_TaskSet(Case): # setting current_task - @current_app.task + @self.app.task(shared=False) def xyz(): pass + from celery._state import _task_stack xyz.push_request() _task_stack.push(xyz) @@ -173,22 +223,22 @@ class test_TaskSet(Case): def apply(self, *args, **kwargs): applied[0] += 1 - ts = TaskSet([mocksubtask(MockTask, (i, i)) - for i in (2, 4, 8)]) + ts = self.TaskSet([mocksubtask(self.MockTask, (i, i)) + for i in (2, 4, 8)], app=self.app) ts.apply() self.assertEqual(applied[0], 3) def test_set_app(self): - ts = TaskSet([]) + ts = self.TaskSet([], app=self.app) ts.app = 42 self.assertEqual(ts.app, 42) def test_set_tasks(self): - ts = TaskSet([]) + ts = self.TaskSet([], app=self.app) ts.tasks = [1, 2, 3] self.assertEqual(ts, [1, 2, 3]) def test_set_Publisher(self): - ts = TaskSet([]) + ts = self.TaskSet([], app=self.app) ts.Publisher = 42 self.assertEqual(ts.Publisher, 42) diff --git a/awx/lib/site-packages/celery/tests/concurrency/test_concurrency.py b/awx/lib/site-packages/celery/tests/concurrency/test_concurrency.py index 30c675ee15..2938877416 100644 --- a/awx/lib/site-packages/celery/tests/concurrency/test_concurrency.py +++ b/awx/lib/site-packages/celery/tests/concurrency/test_concurrency.py @@ -1,25 +1,24 @@ from __future__ import absolute_import -from __future__ import with_statement import os from itertools import count from celery.concurrency.base import apply_target, BasePool -from celery.tests.utils import Case +from celery.tests.case import AppCase, Mock -class test_BasePool(Case): +class test_BasePool(AppCase): def test_apply_target(self): scratch = {} - counter = count(0).next + counter = count(0) def gen_callback(name, retval=None): def callback(*args): - scratch[name] = (counter(), args) + scratch[name] = (next(counter), args) return retval return callback @@ -86,3 +85,27 @@ class test_BasePool(Case): def test_interface_terminate_job(self): with self.assertRaises(NotImplementedError): BasePool(10).terminate_job(101) + + def test_interface_did_start_ok(self): + self.assertTrue(BasePool(10).did_start_ok()) + + def test_interface_register_with_event_loop(self): + self.assertIsNone( + BasePool(10).register_with_event_loop(Mock()), + ) + + def test_interface_on_soft_timeout(self): + self.assertIsNone(BasePool(10).on_soft_timeout(Mock())) + + def test_interface_on_hard_timeout(self): + self.assertIsNone(BasePool(10).on_hard_timeout(Mock())) + + def test_interface_close(self): + p = BasePool(10) + p.on_close = Mock() + p.close() + self.assertEqual(p._state, p.CLOSE) + p.on_close.assert_called_with() + + def test_interface_no_close(self): + self.assertIsNone(BasePool(10).on_close()) diff --git a/awx/lib/site-packages/celery/tests/concurrency/test_eventlet.py b/awx/lib/site-packages/celery/tests/concurrency/test_eventlet.py index 54ae5c8b14..162e4f2cff 100644 --- a/awx/lib/site-packages/celery/tests/concurrency/test_eventlet.py +++ b/awx/lib/site-packages/celery/tests/concurrency/test_eventlet.py @@ -1,12 +1,7 @@ from __future__ import absolute_import -from __future__ import with_statement -import os import sys -from nose import SkipTest -from mock import patch, Mock - from celery.app.defaults import is_pypy from celery.concurrency.eventlet import ( apply_target, @@ -15,13 +10,15 @@ from celery.concurrency.eventlet import ( TaskPool, ) -from celery.tests.utils import Case, mock_module, patch_many, skip_if_pypy +from celery.tests.case import ( + AppCase, Mock, SkipTest, mock_module, patch, patch_many, skip_if_pypy, +) -class EventletCase(Case): +class EventletCase(AppCase): @skip_if_pypy - def setUp(self): + def setup(self): if is_pypy: raise SkipTest('mock_modules not working on PyPy1.9') try: @@ -31,7 +28,7 @@ class EventletCase(Case): 'eventlet not installed, skipping related tests.') @skip_if_pypy - def tearDown(self): + def teardown(self): for mod in [mod for mod in sys.modules if mod.startswith('eventlet')]: try: del(sys.modules[mod]) @@ -42,19 +39,10 @@ class EventletCase(Case): class test_aaa_eventlet_patch(EventletCase): def test_aaa_is_patched(self): - raise SkipTest("side effects") - monkey_patched = [] - prev_monkey_patch = self.eventlet.monkey_patch - self.eventlet.monkey_patch = lambda: monkey_patched.append(True) - prev_eventlet = sys.modules.pop('celery.concurrency.eventlet', None) - os.environ.pop('EVENTLET_NOPATCH') - try: - import celery.concurrency.eventlet # noqa - self.assertTrue(monkey_patched) - finally: - sys.modules['celery.concurrency.eventlet'] = prev_eventlet - os.environ['EVENTLET_NOPATCH'] = 'yes' - self.eventlet.monkey_patch = prev_monkey_patch + with patch('eventlet.monkey_patch', create=True) as monkey_patch: + from celery import maybe_patch_concurrency + maybe_patch_concurrency(['x', '-P', 'eventlet']) + monkey_patch.assert_called_with() eventlet_modules = ( diff --git a/awx/lib/site-packages/celery/tests/concurrency/test_gevent.py b/awx/lib/site-packages/celery/tests/concurrency/test_gevent.py index a6661e23e9..baa105ba4b 100644 --- a/awx/lib/site-packages/celery/tests/concurrency/test_gevent.py +++ b/awx/lib/site-packages/celery/tests/concurrency/test_gevent.py @@ -1,19 +1,16 @@ from __future__ import absolute_import -from __future__ import with_statement - -import os -import sys - -from nose import SkipTest -from mock import Mock from celery.concurrency.gevent import ( Schedule, Timer, TaskPool, + apply_timeout, +) + +from celery.tests.case import ( + AppCase, Mock, SkipTest, mock_module, patch, patch_many, skip_if_pypy, ) -from celery.tests.utils import Case, mock_module, patch_many, skip_if_pypy gevent_modules = ( 'gevent', 'gevent.monkey', @@ -23,10 +20,10 @@ gevent_modules = ( ) -class GeventCase(Case): +class GeventCase(AppCase): @skip_if_pypy - def setUp(self): + def setup(self): try: self.gevent = __import__('gevent') except ImportError: @@ -38,24 +35,15 @@ class test_gevent_patch(GeventCase): def test_is_patched(self): with mock_module(*gevent_modules): - monkey_patched = [] - import gevent - from gevent import monkey - gevent.version_info = (1, 0, 0) - prev_monkey_patch = monkey.patch_all - monkey.patch_all = lambda: monkey_patched.append(True) - prev_gevent = sys.modules.pop('celery.concurrency.gevent', None) - os.environ.pop('GEVENT_NOPATCH') - try: - import celery.concurrency.gevent # noqa - self.assertTrue(monkey_patched) - finally: - sys.modules['celery.concurrency.gevent'] = prev_gevent - os.environ['GEVENT_NOPATCH'] = 'yes' - monkey.patch_all = prev_monkey_patch + with patch('gevent.monkey.patch_all', create=True) as patch_all: + import gevent + gevent.version_info = (1, 0, 0) + from celery import maybe_patch_concurrency + maybe_patch_concurrency(['x', '-P', 'gevent']) + self.assertTrue(patch_all.called) -class test_Schedule(Case): +class test_Schedule(AppCase): def test_sched(self): with mock_module(*gevent_modules): @@ -81,8 +69,11 @@ class test_Schedule(Case): g.kill.side_effect = KeyError() x.clear() + g = x._Greenlet() + g.cancel() -class test_TasKPool(Case): + +class test_TaskPool(AppCase): def test_pool(self): with mock_module(*gevent_modules): @@ -109,7 +100,7 @@ class test_TasKPool(Case): self.assertEqual(x.num_processes, 3) -class test_Timer(Case): +class test_Timer(AppCase): def test_timer(self): with mock_module(*gevent_modules): @@ -119,3 +110,37 @@ class test_Timer(Case): x.start() x.stop() x.schedule.clear.assert_called_with() + + +class test_apply_timeout(AppCase): + + def test_apply_timeout(self): + + class Timeout(Exception): + value = None + + def __init__(self, value): + self.__class__.value = value + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + pass + timeout_callback = Mock(name='timeout_callback') + apply_target = Mock(name='apply_target') + apply_timeout( + Mock(), timeout=10, callback=Mock(name='callback'), + timeout_callback=timeout_callback, + apply_target=apply_target, Timeout=Timeout, + ) + self.assertEqual(Timeout.value, 10) + self.assertTrue(apply_target.called) + + apply_target.side_effect = Timeout(10) + apply_timeout( + Mock(), timeout=10, callback=Mock(), + timeout_callback=timeout_callback, + apply_target=apply_target, Timeout=Timeout, + ) + timeout_callback.assert_called_with(False, 10) diff --git a/awx/lib/site-packages/celery/tests/concurrency/test_pool.py b/awx/lib/site-packages/celery/tests/concurrency/test_pool.py index 97441ba1ed..d1b314b527 100644 --- a/awx/lib/site-packages/celery/tests/concurrency/test_pool.py +++ b/awx/lib/site-packages/celery/tests/concurrency/test_pool.py @@ -3,10 +3,9 @@ from __future__ import absolute_import import time import itertools -from nose import SkipTest +from billiard.einfo import ExceptionInfo -from celery.datastructures import ExceptionInfo -from celery.tests.utils import Case +from celery.tests.case import AppCase, SkipTest def do_something(i): @@ -24,14 +23,14 @@ def raise_something(i): return ExceptionInfo() -class test_TaskPool(Case): +class test_TaskPool(AppCase): - def setUp(self): + def setup(self): try: __import__('multiprocessing') except ImportError: raise SkipTest('multiprocessing not supported') - from celery.concurrency.processes import TaskPool + from celery.concurrency.prefork import TaskPool self.TaskPool = TaskPool def test_attrs(self): @@ -43,10 +42,10 @@ class test_TaskPool(Case): p = self.TaskPool(2) p.start() scratchpad = {} - proc_counter = itertools.count().next + proc_counter = itertools.count() def mycallback(ret_value): - process = proc_counter() + process = next(proc_counter) scratchpad[process] = {} scratchpad[process]['ret_value'] = ret_value diff --git a/awx/lib/site-packages/celery/tests/concurrency/test_prefork.py b/awx/lib/site-packages/celery/tests/concurrency/test_prefork.py new file mode 100644 index 0000000000..7ad247436b --- /dev/null +++ b/awx/lib/site-packages/celery/tests/concurrency/test_prefork.py @@ -0,0 +1,320 @@ +from __future__ import absolute_import + +import errno +import socket +import time + +from itertools import cycle + +from celery.five import items, range +from celery.utils.functional import noop +from celery.tests.case import AppCase, Mock, SkipTest, call, patch +try: + from celery.concurrency import prefork as mp + from celery.concurrency import asynpool +except ImportError: + + class _mp(object): + RUN = 0x1 + + class TaskPool(object): + _pool = Mock() + + def __init__(self, *args, **kwargs): + pass + + def start(self): + pass + + def stop(self): + pass + + def apply_async(self, *args, **kwargs): + pass + mp = _mp() # noqa + asynpool = None # noqa + + +class Object(object): # for writeable attributes. + + def __init__(self, **kwargs): + [setattr(self, k, v) for k, v in items(kwargs)] + + +class MockResult(object): + + def __init__(self, value, pid): + self.value = value + self.pid = pid + + def worker_pids(self): + return [self.pid] + + def get(self): + return self.value + + +class MockPool(object): + started = False + closed = False + joined = False + terminated = False + _state = None + + def __init__(self, *args, **kwargs): + self.started = True + self._timeout_handler = Mock() + self._result_handler = Mock() + self.maintain_pool = Mock() + self._state = mp.RUN + self._processes = kwargs.get('processes') + self._pool = [Object(pid=i, inqW_fd=1, outqR_fd=2) + for i in range(self._processes)] + self._current_proc = cycle(range(self._processes)) + + def close(self): + self.closed = True + self._state = 'CLOSE' + + def join(self): + self.joined = True + + def terminate(self): + self.terminated = True + + def terminate_job(self, *args, **kwargs): + pass + + def restart(self, *args, **kwargs): + pass + + def handle_result_event(self, *args, **kwargs): + pass + + def flush(self): + pass + + def grow(self, n=1): + self._processes += n + + def shrink(self, n=1): + self._processes -= n + + def apply_async(self, *args, **kwargs): + pass + + def register_with_event_loop(self, loop): + pass + + +class ExeMockPool(MockPool): + + def apply_async(self, target, args=(), kwargs={}, callback=noop): + from threading import Timer + res = target(*args, **kwargs) + Timer(0.1, callback, (res, )).start() + return MockResult(res, next(self._current_proc)) + + +class TaskPool(mp.TaskPool): + Pool = BlockingPool = MockPool + + +class ExeMockTaskPool(mp.TaskPool): + Pool = BlockingPool = ExeMockPool + + +class PoolCase(AppCase): + + def setup(self): + try: + import multiprocessing # noqa + except ImportError: + raise SkipTest('multiprocessing not supported') + + +class test_AsynPool(PoolCase): + + def test_gen_not_started(self): + + def gen(): + yield 1 + yield 2 + g = gen() + self.assertTrue(asynpool.gen_not_started(g)) + next(g) + self.assertFalse(asynpool.gen_not_started(g)) + list(g) + self.assertFalse(asynpool.gen_not_started(g)) + + def test_select(self): + ebadf = socket.error() + ebadf.errno = errno.EBADF + with patch('select.select') as select: + select.return_value = ([3], [], []) + self.assertEqual( + asynpool._select(set([3])), + ([3], [], 0), + ) + + select.return_value = ([], [], [3]) + self.assertEqual( + asynpool._select(set([3]), None, set([3])), + ([3], [], 0), + ) + + eintr = socket.error() + eintr.errno = errno.EINTR + select.side_effect = eintr + + readers = set([3]) + self.assertEqual(asynpool._select(readers), ([], [], 1)) + self.assertIn(3, readers) + + with patch('select.select') as select: + select.side_effect = ebadf + readers = set([3]) + self.assertEqual(asynpool._select(readers), ([], [], 1)) + select.assert_has_calls([call([3], [], [], 0)]) + self.assertNotIn(3, readers) + + with patch('select.select') as select: + select.side_effect = MemoryError() + with self.assertRaises(MemoryError): + asynpool._select(set([1])) + + with patch('select.select') as select: + + def se(*args): + select.side_effect = MemoryError() + raise ebadf + select.side_effect = se + with self.assertRaises(MemoryError): + asynpool._select(set([3])) + + with patch('select.select') as select: + + def se2(*args): + select.side_effect = socket.error() + select.side_effect.errno = 1321 + raise ebadf + select.side_effect = se2 + with self.assertRaises(socket.error): + asynpool._select(set([3])) + + with patch('select.select') as select: + + select.side_effect = socket.error() + select.side_effect.errno = 34134 + with self.assertRaises(socket.error): + asynpool._select(set([3])) + + def test_promise(self): + fun = Mock() + x = asynpool.promise(fun, (1, ), {'foo': 1}) + x() + self.assertTrue(x.ready) + fun.assert_called_with(1, foo=1) + + def test_Worker(self): + w = asynpool.Worker(Mock(), Mock()) + w.on_loop_start(1234) + w.outq.put.assert_called_with((asynpool.WORKER_UP, (1234, ))) + + +class test_ResultHandler(PoolCase): + + def test_process_result(self): + x = asynpool.ResultHandler( + Mock(), Mock(), {}, Mock(), + Mock(), Mock(), Mock(), Mock(), + fileno_to_outq={}, + on_process_alive=Mock(), + on_job_ready=Mock(), + ) + self.assertTrue(x) + hub = Mock(name='hub') + recv = x._recv_message = Mock(name='recv_message') + recv.return_value = iter([]) + x.on_state_change = Mock() + x.register_with_event_loop(hub) + proc = x.fileno_to_outq[3] = Mock() + reader = proc.outq._reader + reader.poll.return_value = False + x.handle_event(6) # KeyError + x.handle_event(3) + x._recv_message.assert_called_with( + hub.add_reader, 3, x.on_state_change, + ) + + +class test_TaskPool(PoolCase): + + def test_start(self): + pool = TaskPool(10) + pool.start() + self.assertTrue(pool._pool.started) + self.assertTrue(pool._pool._state == asynpool.RUN) + + _pool = pool._pool + pool.stop() + self.assertTrue(_pool.closed) + self.assertTrue(_pool.joined) + pool.stop() + + pool.start() + _pool = pool._pool + pool.terminate() + pool.terminate() + self.assertTrue(_pool.terminated) + + def test_apply_async(self): + pool = TaskPool(10) + pool.start() + pool.apply_async(lambda x: x, (2, ), {}) + + def test_grow_shrink(self): + pool = TaskPool(10) + pool.start() + self.assertEqual(pool._pool._processes, 10) + pool.grow() + self.assertEqual(pool._pool._processes, 11) + pool.shrink(2) + self.assertEqual(pool._pool._processes, 9) + + def test_info(self): + pool = TaskPool(10) + procs = [Object(pid=i) for i in range(pool.limit)] + + class _Pool(object): + _pool = procs + _maxtasksperchild = None + timeout = 10 + soft_timeout = 5 + + def human_write_stats(self, *args, **kwargs): + return {} + pool._pool = _Pool() + info = pool.info + self.assertEqual(info['max-concurrency'], pool.limit) + self.assertEqual(info['max-tasks-per-child'], 'N/A') + self.assertEqual(info['timeouts'], (5, 10)) + + def test_num_processes(self): + pool = TaskPool(7) + pool.start() + self.assertEqual(pool.num_processes, 7) + + def test_restart(self): + raise SkipTest('functional test') + + def get_pids(pool): + return set([p.pid for p in pool._pool._pool]) + + tp = self.TaskPool(5) + time.sleep(0.5) + tp.start() + pids = get_pids(tp) + tp.restart() + time.sleep(0.5) + self.assertEqual(pids, get_pids(tp)) diff --git a/awx/lib/site-packages/celery/tests/concurrency/test_processes.py b/awx/lib/site-packages/celery/tests/concurrency/test_processes.py deleted file mode 100644 index fb35bc70eb..0000000000 --- a/awx/lib/site-packages/celery/tests/concurrency/test_processes.py +++ /dev/null @@ -1,191 +0,0 @@ -from __future__ import absolute_import -from __future__ import with_statement - -import time - -from itertools import cycle - -from mock import Mock -from nose import SkipTest - -from celery.utils.functional import noop -from celery.tests.utils import Case -try: - from celery.concurrency import processes as mp -except ImportError: - - class _mp(object): - RUN = 0x1 - - class TaskPool(object): - _pool = Mock() - - def __init__(self, *args, **kwargs): - pass - - def start(self): - pass - - def stop(self): - pass - - def apply_async(self, *args, **kwargs): - pass - mp = _mp() # noqa - - -class Object(object): # for writeable attributes. - - def __init__(self, **kwargs): - [setattr(self, k, v) for k, v in kwargs.items()] - - -class MockResult(object): - - def __init__(self, value, pid): - self.value = value - self.pid = pid - - def worker_pids(self): - return [self.pid] - - def get(self): - return self.value - - -class MockPool(object): - started = False - closed = False - joined = False - terminated = False - _state = None - - def __init__(self, *args, **kwargs): - self.started = True - self._timeout_handler = Mock() - self._result_handler = Mock() - self.maintain_pool = Mock() - self._state = mp.RUN - self._processes = kwargs.get('processes') - self._pool = [Object(pid=i) for i in range(self._processes)] - self._current_proc = cycle(xrange(self._processes)).next - - def close(self): - self.closed = True - self._state = 'CLOSE' - - def join(self): - self.joined = True - - def terminate(self): - self.terminated = True - - def grow(self, n=1): - self._processes += n - - def shrink(self, n=1): - self._processes -= n - - def apply_async(self, *args, **kwargs): - pass - - -class ExeMockPool(MockPool): - - def apply_async(self, target, args=(), kwargs={}, callback=noop): - from threading import Timer - res = target(*args, **kwargs) - Timer(0.1, callback, (res, )).start() - return MockResult(res, self._current_proc()) - - -class TaskPool(mp.TaskPool): - Pool = MockPool - - -class ExeMockTaskPool(mp.TaskPool): - Pool = ExeMockPool - - -class test_TaskPool(Case): - - def setUp(self): - try: - import multiprocessing # noqa - except ImportError: - raise SkipTest('multiprocessing not supported') - - def test_start(self): - pool = TaskPool(10) - pool.start() - self.assertTrue(pool._pool.started) - self.assertTrue(pool._pool._state == mp.RUN) - - _pool = pool._pool - pool.stop() - self.assertTrue(_pool.closed) - self.assertTrue(_pool.joined) - pool.stop() - - pool.start() - _pool = pool._pool - pool.terminate() - pool.terminate() - self.assertTrue(_pool.terminated) - - def test_apply_async(self): - pool = TaskPool(10) - pool.start() - pool.apply_async(lambda x: x, (2, ), {}) - - def test_terminate_job(self): - pool = TaskPool(10) - pool._pool = Mock() - pool.terminate_job(1341) - pool._pool.terminate_job.assert_called_with(1341, None) - - def test_grow_shrink(self): - pool = TaskPool(10) - pool.start() - self.assertEqual(pool._pool._processes, 10) - pool.grow() - self.assertEqual(pool._pool._processes, 11) - pool.shrink(2) - self.assertEqual(pool._pool._processes, 9) - - def test_info(self): - pool = TaskPool(10) - procs = [Object(pid=i) for i in range(pool.limit)] - pool._pool = Object(_pool=procs, - _maxtasksperchild=None, - timeout=10, - soft_timeout=5) - info = pool.info - self.assertEqual(info['max-concurrency'], pool.limit) - self.assertIsNone(info['max-tasks-per-child']) - self.assertEqual(info['timeouts'], (5, 10)) - - def test_num_processes(self): - pool = TaskPool(7) - pool.start() - self.assertEqual(pool.num_processes, 7) - - def test_restart_pool(self): - pool = TaskPool() - pool._pool = Mock() - pool.restart() - pool._pool.restart.assert_called_with() - - def test_restart(self): - raise SkipTest('functional test') - - def get_pids(pool): - return set([p.pid for p in pool._pool._pool]) - - tp = self.TaskPool(5) - time.sleep(0.5) - tp.start() - pids = get_pids(tp) - tp.restart() - time.sleep(0.5) - self.assertEqual(pids, get_pids(tp)) diff --git a/awx/lib/site-packages/celery/tests/concurrency/test_solo.py b/awx/lib/site-packages/celery/tests/concurrency/test_solo.py index ba420b60a5..f701c6c642 100644 --- a/awx/lib/site-packages/celery/tests/concurrency/test_solo.py +++ b/awx/lib/site-packages/celery/tests/concurrency/test_solo.py @@ -4,10 +4,10 @@ import operator from celery.concurrency import solo from celery.utils.functional import noop -from celery.tests.utils import Case +from celery.tests.case import AppCase -class test_solo_TaskPool(Case): +class test_solo_TaskPool(AppCase): def test_on_start(self): x = solo.TaskPool() diff --git a/awx/lib/site-packages/celery/tests/concurrency/test_threads.py b/awx/lib/site-packages/celery/tests/concurrency/test_threads.py index 4443c52946..2eb5e3882f 100644 --- a/awx/lib/site-packages/celery/tests/concurrency/test_threads.py +++ b/awx/lib/site-packages/celery/tests/concurrency/test_threads.py @@ -1,11 +1,8 @@ from __future__ import absolute_import -from __future__ import with_statement - -from mock import Mock from celery.concurrency.threads import NullDict, TaskPool, apply_target -from celery.tests.utils import Case, mask_modules, mock_module +from celery.tests.case import AppCase, Case, Mock, mask_modules, mock_module class test_NullDict(Case): @@ -17,7 +14,7 @@ class test_NullDict(Case): x['foo'] -class test_TaskPool(Case): +class test_TaskPool(AppCase): def test_without_threadpool(self): diff --git a/awx/lib/site-packages/celery/tests/config.py b/awx/lib/site-packages/celery/tests/config.py deleted file mode 100644 index c874674232..0000000000 --- a/awx/lib/site-packages/celery/tests/config.py +++ /dev/null @@ -1,54 +0,0 @@ -from __future__ import absolute_import - -import os - -from kombu import Queue - -BROKER_URL = 'memory://' - -#: warn if config module not found -os.environ['C_WNOCONF'] = 'yes' - -#: Don't want log output when running suite. -CELERYD_HIJACK_ROOT_LOGGER = False - -CELERY_RESULT_BACKEND = 'cache' -CELERY_CACHE_BACKEND = 'memory' -CELERY_RESULT_DBURI = 'sqlite:///test.db' -CELERY_SEND_TASK_ERROR_EMAILS = False - -CELERY_DEFAULT_QUEUE = 'testcelery' -CELERY_DEFAULT_EXCHANGE = 'testcelery' -CELERY_DEFAULT_ROUTING_KEY = 'testcelery' -CELERY_QUEUES = ( - Queue('testcelery', routing_key='testcelery'), -) - -CELERY_ENABLE_UTC = True -CELERY_TIMEZONE = 'UTC' - -CELERYD_LOG_COLOR = False - -# Tyrant results tests (only executed if installed and running) -TT_HOST = os.environ.get('TT_HOST') or 'localhost' -TT_PORT = int(os.environ.get('TT_PORT') or 1978) - -# Redis results tests (only executed if installed and running) -CELERY_REDIS_HOST = os.environ.get('REDIS_HOST') or 'localhost' -CELERY_REDIS_PORT = int(os.environ.get('REDIS_PORT') or 6379) -CELERY_REDIS_DB = os.environ.get('REDIS_DB') or 0 -CELERY_REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD') - -# Mongo results tests (only executed if installed and running) -CELERY_MONGODB_BACKEND_SETTINGS = { - 'host': os.environ.get('MONGO_HOST') or 'localhost', - 'port': os.environ.get('MONGO_PORT') or 27017, - 'database': os.environ.get('MONGO_DB') or 'celery_unittests', - 'taskmeta_collection': (os.environ.get('MONGO_TASKMETA_COLLECTION') - or 'taskmeta_collection'), -} -if os.environ.get('MONGO_USER'): - CELERY_MONGODB_BACKEND_SETTINGS['user'] = os.environ.get('MONGO_USER') -if os.environ.get('MONGO_PASSWORD'): - CELERY_MONGODB_BACKEND_SETTINGS['password'] = \ - os.environ.get('MONGO_PASSWORD') diff --git a/awx/lib/site-packages/celery/tests/contrib/test_abortable.py b/awx/lib/site-packages/celery/tests/contrib/test_abortable.py index a72f645347..4bc2df77b9 100644 --- a/awx/lib/site-packages/celery/tests/contrib/test_abortable.py +++ b/awx/lib/site-packages/celery/tests/contrib/test_abortable.py @@ -1,51 +1,49 @@ from __future__ import absolute_import from celery.contrib.abortable import AbortableTask, AbortableAsyncResult -from celery.result import AsyncResult -from celery.tests.utils import Case +from celery.tests.case import AppCase -class MyAbortableTask(AbortableTask): +class test_AbortableTask(AppCase): - def run(self, **kwargs): - return True + def setup(self): - -class test_AbortableTask(Case): + @self.app.task(base=AbortableTask, shared=False) + def abortable(): + return True + self.abortable = abortable def test_async_result_is_abortable(self): - t = MyAbortableTask() - result = t.apply_async() + result = self.abortable.apply_async() tid = result.id - self.assertIsInstance(t.AsyncResult(tid), AbortableAsyncResult) + self.assertIsInstance( + self.abortable.AsyncResult(tid), AbortableAsyncResult, + ) def test_is_not_aborted(self): - t = MyAbortableTask() - t.push_request() + self.abortable.push_request() try: - result = t.apply_async() + result = self.abortable.apply_async() tid = result.id - self.assertFalse(t.is_aborted(task_id=tid)) + self.assertFalse(self.abortable.is_aborted(task_id=tid)) finally: - t.pop_request() + self.abortable.pop_request() def test_is_aborted_not_abort_result(self): - t = MyAbortableTask() - t.AsyncResult = AsyncResult - t.push_request() + self.abortable.AsyncResult = self.app.AsyncResult + self.abortable.push_request() try: - t.request.id = 'foo' - self.assertFalse(t.is_aborted()) + self.abortable.request.id = 'foo' + self.assertFalse(self.abortable.is_aborted()) finally: - t.pop_request() + self.abortable.pop_request() def test_abort_yields_aborted(self): - t = MyAbortableTask() - t.push_request() + self.abortable.push_request() try: - result = t.apply_async() + result = self.abortable.apply_async() result.abort() tid = result.id - self.assertTrue(t.is_aborted(task_id=tid)) + self.assertTrue(self.abortable.is_aborted(task_id=tid)) finally: - t.pop_request() + self.abortable.pop_request() diff --git a/awx/lib/site-packages/celery/tests/contrib/test_methods.py b/awx/lib/site-packages/celery/tests/contrib/test_methods.py new file mode 100644 index 0000000000..da74cc98b1 --- /dev/null +++ b/awx/lib/site-packages/celery/tests/contrib/test_methods.py @@ -0,0 +1,34 @@ +from __future__ import absolute_import + +from celery.contrib.methods import task_method, task + +from celery.tests.case import AppCase, patch + + +class test_task_method(AppCase): + + def test_task_method(self): + + class X(object): + + def __init__(self): + self.state = 0 + + @self.app.task(shared=False, filter=task_method) + def add(self, x): + self.state += x + + x = X() + x.add(2) + self.assertEqual(x.state, 2) + x.add(4) + self.assertEqual(x.state, 6) + + self.assertTrue(X.add) + self.assertIs(x.add.__self__, x) + + def test_task(self): + with patch('celery.contrib.methods.current_app') as curapp: + fun = object() + task(fun, x=1) + curapp.task.assert_called_with(fun, x=1, filter=task_method) diff --git a/awx/lib/site-packages/celery/tests/contrib/test_migrate.py b/awx/lib/site-packages/celery/tests/contrib/test_migrate.py index ce9ead6af9..fbd80a536c 100644 --- a/awx/lib/site-packages/celery/tests/contrib/test_migrate.py +++ b/awx/lib/site-packages/celery/tests/contrib/test_migrate.py @@ -1,17 +1,35 @@ -from __future__ import absolute_import -from __future__ import with_statement +from __future__ import absolute_import, unicode_literals + +from contextlib import contextmanager + +from amqp import ChannelError from kombu import Connection, Producer, Queue, Exchange -from kombu.exceptions import StdChannelError -from mock import patch + +from kombu.transport.virtual import QoS from celery.contrib.migrate import ( + StopFiltering, State, migrate_task, migrate_tasks, + filter_callback, + _maybe_queue, + filter_status, + move_by_taskmap, + move_by_idmap, + move_task_by_id, + start_filter, + task_id_in, + task_id_eq, + expand_dest, + move, ) from celery.utils.encoding import bytes_t, ensure_bytes -from celery.tests.utils import AppCase, Case, Mock +from celery.tests.case import AppCase, Mock, override_stdouts, patch + +# hack to ignore error at shutdown +QoS.restore_at_shutdown = False def Message(body, exchange='exchange', routing_key='rkey', @@ -34,16 +52,198 @@ def Message(body, exchange='exchange', routing_key='rkey', ) -class test_State(Case): +class test_State(AppCase): def test_strtotal(self): x = State() - self.assertEqual(x.strtotal, u'?') + self.assertEqual(x.strtotal, '?') x.total_apx = 100 - self.assertEqual(x.strtotal, u'100') + self.assertEqual(x.strtotal, '100') + + def test_repr(self): + x = State() + self.assertTrue(repr(x)) + x.filtered = 'foo' + self.assertTrue(repr(x)) -class test_migrate_task(Case): +class test_move(AppCase): + + @contextmanager + def move_context(self, **kwargs): + with patch('celery.contrib.migrate.start_filter') as start: + with patch('celery.contrib.migrate.republish') as republish: + pred = Mock(name='predicate') + move(pred, app=self.app, + connection=self.app.connection(), **kwargs) + self.assertTrue(start.called) + callback = start.call_args[0][2] + yield callback, pred, republish + + def msgpair(self, **kwargs): + body = dict({'task': 'add', 'id': 'id'}, **kwargs) + return body, Message(body) + + def test_move(self): + with self.move_context() as (callback, pred, republish): + pred.return_value = None + body, message = self.msgpair() + callback(body, message) + self.assertFalse(message.ack.called) + self.assertFalse(republish.called) + + pred.return_value = 'foo' + callback(body, message) + message.ack.assert_called_with() + self.assertTrue(republish.called) + + def test_move_transform(self): + trans = Mock(name='transform') + trans.return_value = Queue('bar') + with self.move_context(transform=trans) as (callback, pred, republish): + pred.return_value = 'foo' + body, message = self.msgpair() + with patch('celery.contrib.migrate.maybe_declare') as maybed: + callback(body, message) + trans.assert_called_with('foo') + self.assertTrue(maybed.called) + self.assertTrue(republish.called) + + def test_limit(self): + with self.move_context(limit=1) as (callback, pred, republish): + pred.return_value = 'foo' + body, message = self.msgpair() + with self.assertRaises(StopFiltering): + callback(body, message) + self.assertTrue(republish.called) + + def test_callback(self): + cb = Mock() + with self.move_context(callback=cb) as (callback, pred, republish): + pred.return_value = 'foo' + body, message = self.msgpair() + callback(body, message) + self.assertTrue(republish.called) + self.assertTrue(cb.called) + + +class test_start_filter(AppCase): + + def test_start(self): + with patch('celery.contrib.migrate.eventloop') as evloop: + app = Mock() + filt = Mock(name='filter') + conn = Connection('memory://') + evloop.side_effect = StopFiltering() + app.amqp.queues = {'foo': Queue('foo'), 'bar': Queue('bar')} + consumer = app.amqp.TaskConsumer.return_value = Mock(name='consum') + consumer.queues = list(app.amqp.queues.values()) + consumer.channel = conn.default_channel + consumer.__enter__ = Mock(name='consumer.__enter__') + consumer.__exit__ = Mock(name='consumer.__exit__') + consumer.callbacks = [] + + def register_callback(x): + consumer.callbacks.append(x) + consumer.register_callback = register_callback + + start_filter(app, conn, filt, + queues='foo,bar', ack_messages=True) + body = {'task': 'add', 'id': 'id'} + for callback in consumer.callbacks: + callback(body, Message(body)) + consumer.callbacks[:] = [] + cb = Mock(name='callback=') + start_filter(app, conn, filt, tasks='add,mul', callback=cb) + for callback in consumer.callbacks: + callback(body, Message(body)) + self.assertTrue(cb.called) + + on_declare_queue = Mock() + start_filter(app, conn, filt, tasks='add,mul', queues='foo', + on_declare_queue=on_declare_queue) + self.assertTrue(on_declare_queue.called) + start_filter(app, conn, filt, queues=['foo', 'bar']) + consumer.callbacks[:] = [] + state = State() + start_filter(app, conn, filt, + tasks='add,mul', callback=cb, state=state, limit=1) + stop_filtering_raised = False + for callback in consumer.callbacks: + try: + callback(body, Message(body)) + except StopFiltering: + stop_filtering_raised = True + self.assertTrue(state.count) + self.assertTrue(stop_filtering_raised) + + +class test_filter_callback(AppCase): + + def test_filter(self): + callback = Mock() + filt = filter_callback(callback, ['add', 'mul']) + t1 = {'task': 'add'} + t2 = {'task': 'div'} + + message = Mock() + filt(t2, message) + self.assertFalse(callback.called) + filt(t1, message) + callback.assert_called_with(t1, message) + + +class test_utils(AppCase): + + def test_task_id_in(self): + self.assertTrue(task_id_in(['A'], {'id': 'A'}, Mock())) + self.assertFalse(task_id_in(['A'], {'id': 'B'}, Mock())) + + def test_task_id_eq(self): + self.assertTrue(task_id_eq('A', {'id': 'A'}, Mock())) + self.assertFalse(task_id_eq('A', {'id': 'B'}, Mock())) + + def test_expand_dest(self): + self.assertEqual(expand_dest(None, 'foo', 'bar'), ('foo', 'bar')) + self.assertEqual(expand_dest(('b', 'x'), 'foo', 'bar'), ('b', 'x')) + + def test_maybe_queue(self): + app = Mock() + app.amqp.queues = {'foo': 313} + self.assertEqual(_maybe_queue(app, 'foo'), 313) + self.assertEqual(_maybe_queue(app, Queue('foo')), Queue('foo')) + + def test_filter_status(self): + with override_stdouts() as (stdout, stderr): + filter_status(State(), {'id': '1', 'task': 'add'}, Mock()) + self.assertTrue(stdout.getvalue()) + + def test_move_by_taskmap(self): + with patch('celery.contrib.migrate.move') as move: + move_by_taskmap({'add': Queue('foo')}) + self.assertTrue(move.called) + cb = move.call_args[0][0] + self.assertTrue(cb({'task': 'add'}, Mock())) + + def test_move_by_idmap(self): + with patch('celery.contrib.migrate.move') as move: + move_by_idmap({'123f': Queue('foo')}) + self.assertTrue(move.called) + cb = move.call_args[0][0] + self.assertTrue(cb({'id': '123f'}, Mock())) + + def test_move_task_by_id(self): + with patch('celery.contrib.migrate.move') as move: + move_task_by_id('123f', Queue('foo')) + self.assertTrue(move.called) + cb = move.call_args[0][0] + self.assertEqual( + cb({'id': '123f'}, Mock()), + Queue('foo'), + ) + + +class test_migrate_task(AppCase): def test_removes_compression_header(self): x = Message('foo', compression='zlib') @@ -78,7 +278,7 @@ class test_migrate_tasks(AppCase): self.assertTrue(x.default_channel.queues) self.assertFalse(y.default_channel.queues) - migrate_tasks(x, y) + migrate_tasks(x, y, accept=['text/plain'], app=self.app) yq = q(y.default_channel) self.assertEqual(yq.get().body, ensure_bytes('foo')) @@ -87,25 +287,28 @@ class test_migrate_tasks(AppCase): Producer(x).publish('foo', exchange=name, routing_key=name) callback = Mock() - migrate_tasks(x, y, callback=callback) + migrate_tasks(x, y, + callback=callback, accept=['text/plain'], app=self.app) self.assertTrue(callback.called) migrate = Mock() Producer(x).publish('baz', exchange=name, routing_key=name) - migrate_tasks(x, y, callback=callback, migrate=migrate) + migrate_tasks(x, y, callback=callback, + migrate=migrate, accept=['text/plain'], app=self.app) self.assertTrue(migrate.called) with patch('kombu.transport.virtual.Channel.queue_declare') as qd: def effect(*args, **kwargs): if kwargs.get('passive'): - raise StdChannelError() + raise ChannelError('some channel error') return 0, 3, 0 qd.side_effect = effect - migrate_tasks(x, y) + migrate_tasks(x, y, app=self.app) x = Connection('memory://') x.default_channel.queues = {} y.default_channel.queues = {} callback = Mock() - migrate_tasks(x, y, callback=callback) + migrate_tasks(x, y, + callback=callback, accept=['text/plain'], app=self.app) self.assertFalse(callback.called) diff --git a/awx/lib/site-packages/celery/tests/contrib/test_rdb.py b/awx/lib/site-packages/celery/tests/contrib/test_rdb.py index ff50f03933..a933c60101 100644 --- a/awx/lib/site-packages/celery/tests/contrib/test_rdb.py +++ b/awx/lib/site-packages/celery/tests/contrib/test_rdb.py @@ -1,17 +1,18 @@ from __future__ import absolute_import -from __future__ import with_statement import errno import socket -from mock import Mock, patch - from celery.contrib.rdb import ( Rdb, debugger, set_trace, ) -from celery.tests.utils import Case, WhateverIO, skip_if_pypy +from celery.tests.case import Case, Mock, WhateverIO, patch, skip_if_pypy + + +class SockErr(socket.error): + errno = None class test_Rdb(Case): @@ -50,11 +51,11 @@ class test_Rdb(Case): with patch('celery.contrib.rdb._frame'): rdb.set_trace() rdb.set_trace(Mock()) - pset.side_effect = socket.error + pset.side_effect = SockErr pset.side_effect.errno = errno.ECONNRESET rdb.set_trace() pset.side_effect.errno = errno.ENOENT - with self.assertRaises(socket.error): + with self.assertRaises(SockErr): rdb.set_trace() # _close_session @@ -81,9 +82,9 @@ class test_Rdb(Case): curproc.return_value.name = 'PoolWorker-10' Rdb(out=out) - err = sock.return_value.bind.side_effect = socket.error() + err = sock.return_value.bind.side_effect = SockErr() err.errno = errno.ENOENT - with self.assertRaises(socket.error): + with self.assertRaises(SockErr): Rdb(out=out) err.errno = errno.EADDRINUSE with self.assertRaises(Exception): diff --git a/awx/lib/site-packages/celery/tests/events/test_cursesmon.py b/awx/lib/site-packages/celery/tests/events/test_cursesmon.py index e242fed951..c8e615167c 100644 --- a/awx/lib/site-packages/celery/tests/events/test_cursesmon.py +++ b/awx/lib/site-packages/celery/tests/events/test_cursesmon.py @@ -1,8 +1,6 @@ from __future__ import absolute_import -from nose import SkipTest - -from celery.tests.utils import Case +from celery.tests.case import AppCase, SkipTest class MockWindow(object): @@ -11,16 +9,16 @@ class MockWindow(object): return self.y, self.x -class test_CursesDisplay(Case): +class test_CursesDisplay(AppCase): - def setUp(self): + def setup(self): try: import curses # noqa except ImportError: raise SkipTest('curses monitor requires curses') from celery.events import cursesmon - self.monitor = cursesmon.CursesMonitor(object()) + self.monitor = cursesmon.CursesMonitor(object(), app=self.app) self.win = MockWindow() self.monitor.win = self.win diff --git a/awx/lib/site-packages/celery/tests/events/test_events.py b/awx/lib/site-packages/celery/tests/events/test_events.py index 332fc60bab..791f4167ed 100644 --- a/awx/lib/site-packages/celery/tests/events/test_events.py +++ b/awx/lib/site-packages/celery/tests/events/test_events.py @@ -1,13 +1,9 @@ from __future__ import absolute_import -from __future__ import with_statement import socket -from mock import Mock - -from celery import Celery -from celery import events -from celery.tests.utils import AppCase +from celery.events import Event +from celery.tests.case import AppCase, Mock class MockProducer(object): @@ -34,7 +30,7 @@ class MockProducer(object): class test_Event(AppCase): def test_constructor(self): - event = events.Event('world war II') + event = Event('world war II') self.assertEqual(event['type'], 'world war II') self.assertTrue(event['timestamp']) @@ -42,21 +38,29 @@ class test_Event(AppCase): class test_EventDispatcher(AppCase): def test_redis_uses_fanout_exchange(self): - with Celery(set_as_current=False) as app: - app.connection = Mock() - conn = app.connection.return_value = Mock() - conn.transport.driver_type = 'redis' + self.app.connection = Mock() + conn = self.app.connection.return_value = Mock() + conn.transport.driver_type = 'redis' - dispatcher = app.events.Dispatcher(conn, enabled=False) - self.assertEqual(dispatcher.exchange.type, 'fanout') + dispatcher = self.app.events.Dispatcher(conn, enabled=False) + self.assertEqual(dispatcher.exchange.type, 'fanout') def test_others_use_topic_exchange(self): - with Celery(set_as_current=False) as app: - app.connection = Mock() - conn = app.connection.return_value = Mock() - conn.transport.driver_type = 'amqp' - dispatcher = app.events.Dispatcher(conn, enabled=False) - self.assertEqual(dispatcher.exchange.type, 'topic') + self.app.connection = Mock() + conn = self.app.connection.return_value = Mock() + conn.transport.driver_type = 'amqp' + dispatcher = self.app.events.Dispatcher(conn, enabled=False) + self.assertEqual(dispatcher.exchange.type, 'topic') + + def test_takes_channel_connection(self): + x = self.app.events.Dispatcher(channel=Mock()) + self.assertIs(x.connection, x.channel.connection.client) + + def test_sql_transports_disabled(self): + conn = Mock() + conn.transport.driver_type = 'sql' + x = self.app.events.Dispatcher(connection=conn) + self.assertFalse(x.enabled) def test_send(self): producer = MockProducer() @@ -164,9 +168,11 @@ class test_EventReceiver(AppCase): connection = Mock() connection.transport_cls = 'memory' - r = events.EventReceiver(connection, - handlers={'world-war': my_handler}, - node_id='celery.tests') + r = self.app.events.Receiver( + connection, + handlers={'world-war': my_handler}, + node_id='celery.tests', + ) r._receive(message, object()) self.assertTrue(got_event[0]) @@ -181,31 +187,38 @@ class test_EventReceiver(AppCase): connection = Mock() connection.transport_cls = 'memory' - r = events.EventReceiver(connection, node_id='celery.tests') - events.EventReceiver.handlers['*'] = my_handler - try: - r._receive(message, object()) - self.assertTrue(got_event[0]) - finally: - events.EventReceiver.handlers = {} + r = self.app.events.Receiver(connection, node_id='celery.tests') + r.handlers['*'] = my_handler + r._receive(message, object()) + self.assertTrue(got_event[0]) def test_itercapture(self): connection = self.app.connection() try: r = self.app.events.Receiver(connection, node_id='celery.tests') it = r.itercapture(timeout=0.0001, wakeup=False) - consumer = it.next() - self.assertTrue(consumer.queues) - self.assertEqual(consumer.callbacks[0], r._receive) with self.assertRaises(socket.timeout): - it.next() + next(it) with self.assertRaises(socket.timeout): r.capture(timeout=0.00001) finally: connection.close() + def test_event_from_message_localize_disabled(self): + r = self.app.events.Receiver(Mock(), node_id='celery.tests') + r.adjust_clock = Mock() + ts_adjust = Mock() + + r.event_from_message( + {'type': 'worker-online', 'clock': 313}, + localize=False, + adjust_timestamp=ts_adjust, + ) + self.assertFalse(ts_adjust.called) + r.adjust_clock.assert_called_with(313) + def test_itercapture_limit(self): connection = self.app.connection() channel = connection.channel() @@ -215,17 +228,19 @@ class test_EventReceiver(AppCase): def handler(event): events_received[0] += 1 - producer = self.app.events.Dispatcher(connection, - enabled=True, - channel=channel) - r = self.app.events.Receiver(connection, - handlers={'*': handler}, - node_id='celery.tests') + producer = self.app.events.Dispatcher( + connection, enabled=True, channel=channel, + ) + r = self.app.events.Receiver( + connection, + handlers={'*': handler}, + node_id='celery.tests', + ) evs = ['ev1', 'ev2', 'ev3', 'ev4', 'ev5'] for ev in evs: producer.send(ev) it = r.itercapture(limit=4, wakeup=True) - it.next() # skip consumer (see itercapture) + next(it) # skip consumer (see itercapture) list(it) self.assertEqual(events_received[0], 4) finally: diff --git a/awx/lib/site-packages/celery/tests/events/test_snapshot.py b/awx/lib/site-packages/celery/tests/events/test_snapshot.py index ecbe77213e..f551751d6a 100644 --- a/awx/lib/site-packages/celery/tests/events/test_snapshot.py +++ b/awx/lib/site-packages/celery/tests/events/test_snapshot.py @@ -1,12 +1,8 @@ from __future__ import absolute_import -from __future__ import with_statement -from mock import patch - -from celery.app import app_or_default from celery.events import Events from celery.events.snapshot import Polaroid, evcam -from celery.tests.utils import Case +from celery.tests.case import AppCase, patch, restore_logging class TRef(object): @@ -23,16 +19,15 @@ class TRef(object): class MockTimer(object): installed = [] - def apply_interval(self, msecs, fun, *args, **kwargs): + def call_repeatedly(self, secs, fun, *args, **kwargs): self.installed.append(fun) return TRef() timer = MockTimer() -class test_Polaroid(Case): +class test_Polaroid(AppCase): - def setUp(self): - self.app = app_or_default() + def setup(self): self.state = self.app.events.State() def test_constructor(self): @@ -100,7 +95,7 @@ class test_Polaroid(Case): self.assertEqual(shutter_signal_sent[0], 1) -class test_evcam(Case): +class test_evcam(AppCase): class MockReceiver(object): raise_keyboard_interrupt = False @@ -114,25 +109,22 @@ class test_evcam(Case): def Receiver(self, *args, **kwargs): return test_evcam.MockReceiver() - def setUp(self): - self.app = app_or_default() - self.prev, self.app.events = self.app.events, self.MockEvents() + def setup(self): + self.app.events = self.MockEvents() self.app.events.app = self.app - def tearDown(self): - self.app.events = self.prev - def test_evcam(self): - evcam(Polaroid, timer=timer) - evcam(Polaroid, timer=timer, loglevel='CRITICAL') - self.MockReceiver.raise_keyboard_interrupt = True - try: - with self.assertRaises(SystemExit): - evcam(Polaroid, timer=timer) - finally: - self.MockReceiver.raise_keyboard_interrupt = False + with restore_logging(): + evcam(Polaroid, timer=timer, app=self.app) + evcam(Polaroid, timer=timer, loglevel='CRITICAL', app=self.app) + self.MockReceiver.raise_keyboard_interrupt = True + try: + with self.assertRaises(SystemExit): + evcam(Polaroid, timer=timer, app=self.app) + finally: + self.MockReceiver.raise_keyboard_interrupt = False @patch('celery.platforms.create_pidlock') def test_evcam_pidfile(self, create_pidlock): - evcam(Polaroid, timer=timer, pidfile='/var/pid') + evcam(Polaroid, timer=timer, pidfile='/var/pid', app=self.app) create_pidlock.assert_called_with('/var/pid') diff --git a/awx/lib/site-packages/celery/tests/events/test_state.py b/awx/lib/site-packages/celery/tests/events/test_state.py index 18bdcbfb78..a1b3c1a605 100644 --- a/awx/lib/site-packages/celery/tests/events/test_state.py +++ b/awx/lib/site-packages/celery/tests/events/test_state.py @@ -1,14 +1,23 @@ from __future__ import absolute_import -from time import time +import pickle +from random import shuffle +from time import time from itertools import count from celery import states from celery.events import Event -from celery.events.state import State, Worker, Task, HEARTBEAT_EXPIRE_WINDOW +from celery.events.state import ( + State, + Worker, + Task, + HEARTBEAT_EXPIRE_WINDOW, + HEARTBEAT_DRIFT_MAX, +) +from celery.five import range from celery.utils import uuid -from celery.tests.utils import Case +from celery.tests.case import AppCase, patch class replay(object): @@ -17,22 +26,29 @@ class replay(object): self.state = state self.rewind() self.setup() + self.current_clock = 0 def setup(self): pass + def next_event(self): + ev = self.events[next(self.position)] + ev['local_received'] = ev['timestamp'] + self.current_clock = ev.get('clock') or self.current_clock + 1 + return ev + def __iter__(self): return self def __next__(self): try: - self.state.event(self.events[self.position()]) + self.state.event(self.next_event()) except IndexError: raise StopIteration() next = __next__ def rewind(self): - self.position = count(0).next + self.position = count(0) return self def play(self): @@ -78,6 +94,48 @@ class ev_task_states(replay): ] +def QTEV(type, uuid, hostname, clock, timestamp=None): + """Quick task event.""" + return Event('task-{0}'.format(type), uuid=uuid, hostname=hostname, + clock=clock, timestamp=timestamp or time()) + + +class ev_logical_clock_ordering(replay): + + def __init__(self, state, offset=0, uids=None): + self.offset = offset or 0 + self.uids = self.setuids(uids) + super(ev_logical_clock_ordering, self).__init__(state) + + def setuids(self, uids): + uids = self.tA, self.tB, self.tC = uids or [uuid(), uuid(), uuid()] + return uids + + def setup(self): + offset = self.offset + tA, tB, tC = self.uids + self.events = [ + QTEV('received', tA, 'w1', clock=offset + 1), + QTEV('received', tB, 'w2', clock=offset + 1), + QTEV('started', tA, 'w1', clock=offset + 3), + QTEV('received', tC, 'w2', clock=offset + 3), + QTEV('started', tB, 'w2', clock=offset + 5), + QTEV('retried', tA, 'w1', clock=offset + 7), + QTEV('succeeded', tB, 'w2', clock=offset + 9), + QTEV('started', tC, 'w2', clock=offset + 10), + QTEV('received', tA, 'w3', clock=offset + 13), + QTEV('succeded', tC, 'w2', clock=offset + 12), + QTEV('started', tA, 'w3', clock=offset + 14), + QTEV('succeeded', tA, 'w3', clock=offset + 16), + ] + + def rewind_with_offset(self, offset, uids=None): + self.offset = offset + self.uids = self.setuids(uids or self.uids) + self.setup() + self.rewind() + + class ev_snapshot(replay): def setup(self): @@ -93,7 +151,22 @@ class ev_snapshot(replay): uuid=uuid(), hostname=worker)) -class test_Worker(Case): +class test_Worker(AppCase): + + def test_equality(self): + self.assertEqual(Worker(hostname='foo').hostname, 'foo') + self.assertEqual( + Worker(hostname='foo'), Worker(hostname='foo'), + ) + self.assertNotEqual( + Worker(hostname='foo'), Worker(hostname='bar'), + ) + self.assertEqual( + hash(Worker(hostname='foo')), hash(Worker(hostname='foo')), + ) + self.assertNotEqual( + hash(Worker(hostname='foo')), hash(Worker(hostname='bar')), + ) def test_survives_missing_timestamp(self): worker = Worker(hostname='foo') @@ -103,8 +176,37 @@ class test_Worker(Case): def test_repr(self): self.assertTrue(repr(Worker(hostname='foo'))) + def test_drift_warning(self): + worker = Worker(hostname='foo') + with patch('celery.events.state.warn') as warn: + worker.update_heartbeat(time(), time() + (HEARTBEAT_DRIFT_MAX * 2)) + self.assertTrue(warn.called) + self.assertIn('Substantial drift', warn.call_args[0][0]) -class test_Task(Case): + def test_update_heartbeat(self): + worker = Worker(hostname='foo') + worker.update_heartbeat(time(), time()) + self.assertEqual(len(worker.heartbeats), 1) + worker.update_heartbeat(time() - 10, time()) + self.assertEqual(len(worker.heartbeats), 1) + + +class test_Task(AppCase): + + def test_equality(self): + self.assertEqual(Task(uuid='foo').uuid, 'foo') + self.assertEqual( + Task(uuid='foo'), Task(uuid='foo'), + ) + self.assertNotEqual( + Task(uuid='foo'), Task(uuid='bar'), + ) + self.assertEqual( + hash(Task(uuid='foo')), hash(Task(uuid='foo')), + ) + self.assertNotEqual( + hash(Task(uuid='foo')), hash(Task(uuid='bar')), + ) def test_info(self): task = Task(uuid='abcdefg', @@ -116,6 +218,7 @@ class test_Task(Case): eta=1, runtime=0.0001, expires=1, + foo=None, exception=1, received=time() - 10, started=time() - 8, @@ -130,6 +233,7 @@ class test_Task(Case): self.assertEqual(sorted(['args', 'kwargs']), sorted(task.info(['args', 'kwargs']).keys())) + self.assertFalse(list(task.info('foo'))) def test_ready(self): task = Task(uuid='abcdefg', @@ -160,14 +264,36 @@ class test_Task(Case): self.assertTrue(repr(Task(uuid='xxx', name='tasks.add'))) -class test_State(Case): +class test_State(AppCase): def test_repr(self): self.assertTrue(repr(State())) + def test_pickleable(self): + self.assertTrue(pickle.loads(pickle.dumps(State()))) + + def test_task_logical_clock_ordering(self): + state = State() + r = ev_logical_clock_ordering(state) + tA, tB, tC = r.uids + r.play() + now = list(state.tasks_by_time()) + self.assertEqual(now[0][0], tA) + self.assertEqual(now[1][0], tC) + self.assertEqual(now[2][0], tB) + for _ in range(1000): + shuffle(r.uids) + tA, tB, tC = r.uids + r.rewind_with_offset(r.current_clock + 1, r.uids) + r.play() + now = list(state.tasks_by_time()) + self.assertEqual(now[0][0], tA) + self.assertEqual(now[1][0], tC) + self.assertEqual(now[2][0], tB) + def test_worker_online_offline(self): r = ev_worker_online_offline(State()) - r.next() + next(r) self.assertTrue(r.state.alive_workers()) self.assertTrue(r.state.workers['utest1'].alive) r.play() @@ -181,7 +307,7 @@ class test_State(Case): def test_worker_heartbeat_expire(self): r = ev_worker_heartbeats(State()) - r.next() + next(r) self.assertFalse(r.state.alive_workers()) self.assertFalse(r.state.workers['utest1'].alive) r.play() @@ -192,7 +318,7 @@ class test_State(Case): r = ev_task_states(State()) # RECEIVED - r.next() + next(r) self.assertTrue(r.tid in r.state.tasks) task = r.state.tasks[r.tid] self.assertEqual(task.state, states.RECEIVED) @@ -201,7 +327,7 @@ class test_State(Case): self.assertEqual(task.worker.hostname, 'utest1') # STARTED - r.next() + next(r) self.assertTrue(r.state.workers['utest1'].alive, 'any task event adds worker heartbeat') self.assertEqual(task.state, states.STARTED) @@ -210,14 +336,14 @@ class test_State(Case): self.assertEqual(task.worker.hostname, 'utest1') # REVOKED - r.next() + next(r) self.assertEqual(task.state, states.REVOKED) self.assertTrue(task.revoked) self.assertEqual(task.timestamp, task.revoked) self.assertEqual(task.worker.hostname, 'utest1') # RETRY - r.next() + next(r) self.assertEqual(task.state, states.RETRY) self.assertTrue(task.retried) self.assertEqual(task.timestamp, task.retried) @@ -226,7 +352,7 @@ class test_State(Case): self.assertEqual(task.traceback, 'line 2 at main') # FAILURE - r.next() + next(r) self.assertEqual(task.state, states.FAILURE) self.assertTrue(task.failed) self.assertEqual(task.timestamp, task.failed) @@ -235,7 +361,7 @@ class test_State(Case): self.assertEqual(task.traceback, 'line 1 at main') # SUCCESS - r.next() + next(r) self.assertEqual(task.state, states.SUCCESS) self.assertTrue(task.succeeded) self.assertEqual(task.timestamp, task.succeeded) @@ -305,13 +431,13 @@ class test_State(Case): def test_tasks_by_timestamp(self): r = ev_snapshot(State()) r.play() - self.assertEqual(len(r.state.tasks_by_timestamp()), 20) + self.assertEqual(len(list(r.state.tasks_by_timestamp())), 20) def test_tasks_by_type(self): r = ev_snapshot(State()) r.play() - self.assertEqual(len(r.state.tasks_by_type('task1')), 10) - self.assertEqual(len(r.state.tasks_by_type('task2')), 10) + self.assertEqual(len(list(r.state.tasks_by_type('task1'))), 10) + self.assertEqual(len(list(r.state.tasks_by_type('task2'))), 10) def test_alive_workers(self): r = ev_snapshot(State()) @@ -321,8 +447,8 @@ class test_State(Case): def test_tasks_by_worker(self): r = ev_snapshot(State()) r.play() - self.assertEqual(len(r.state.tasks_by_worker('utest1')), 10) - self.assertEqual(len(r.state.tasks_by_worker('utest2')), 10) + self.assertEqual(len(list(r.state.tasks_by_worker('utest1'))), 10) + self.assertEqual(len(list(r.state.tasks_by_worker('utest2'))), 10) def test_survives_unknown_worker_event(self): s = State() @@ -336,6 +462,29 @@ class test_State(Case): 'uuid': 'x', 'hostname': 'y'}) + def test_limits_maxtasks(self): + s = State() + s.max_tasks_in_memory = 1 + s.task_event('task-unknown-event-xxx', {'foo': 'bar', + 'uuid': 'x', + 'hostname': 'y', + 'clock': 3}) + s.task_event('task-unknown-event-xxx', {'foo': 'bar', + 'uuid': 'y', + 'hostname': 'y', + 'clock': 4}) + + s.task_event('task-unknown-event-xxx', {'foo': 'bar', + 'uuid': 'z', + 'hostname': 'y', + 'clock': 5}) + self.assertEqual(len(s._taskheap), 2) + self.assertEqual(s._taskheap[0].clock, 4) + self.assertEqual(s._taskheap[1].clock, 5) + + s._taskheap.append(s._taskheap[0]) + self.assertTrue(list(s.tasks_by_time())) + def test_callback(self): scratch = {} diff --git a/awx/lib/site-packages/kombu/tests/utilities/__init__.py b/awx/lib/site-packages/celery/tests/fixups/__init__.py similarity index 100% rename from awx/lib/site-packages/kombu/tests/utilities/__init__.py rename to awx/lib/site-packages/celery/tests/fixups/__init__.py diff --git a/awx/lib/site-packages/celery/tests/fixups/test_django.py b/awx/lib/site-packages/celery/tests/fixups/test_django.py new file mode 100644 index 0000000000..1a995fe076 --- /dev/null +++ b/awx/lib/site-packages/celery/tests/fixups/test_django.py @@ -0,0 +1,277 @@ +from __future__ import absolute_import + +import os + +from contextlib import contextmanager + +from celery.fixups.django import ( + _maybe_close_fd, + fixup, + DjangoFixup, +) + +from celery.tests.case import ( + AppCase, Mock, patch, patch_many, patch_modules, mask_modules, +) + + +class test_DjangoFixup(AppCase): + + def test_fixup(self): + with patch('celery.fixups.django.DjangoFixup') as Fixup: + with patch.dict(os.environ, DJANGO_SETTINGS_MODULE=''): + fixup(self.app) + self.assertFalse(Fixup.called) + with patch.dict(os.environ, DJANGO_SETTINGS_MODULE='settings'): + with mask_modules('django'): + with self.assertWarnsRegex(UserWarning, 'but Django is'): + fixup(self.app) + self.assertFalse(Fixup.called) + with patch_modules('django'): + fixup(self.app) + self.assertTrue(Fixup.called) + + @contextmanager + def fixup_context(self, app): + with patch('celery.fixups.django.import_module') as import_module: + with patch('celery.fixups.django.symbol_by_name') as symbyname: + f = DjangoFixup(app) + yield f, import_module, symbyname + + def test_maybe_close_fd(self): + with patch('os.close'): + _maybe_close_fd(Mock()) + _maybe_close_fd(object()) + + def test_init(self): + with self.fixup_context(self.app) as (f, importmod, sym): + self.assertTrue(f) + + def se(name): + if name == 'django.utils.timezone:now': + raise ImportError() + return Mock() + sym.side_effect = se + self.assertTrue(DjangoFixup(self.app)._now) + + def se2(name): + if name == 'django.db:close_old_connections': + raise ImportError() + return Mock() + sym.side_effect = se2 + self.assertIsNone(DjangoFixup(self.app)._close_old_connections) + + def test_install(self): + self.app.conf = {'CELERY_DB_REUSE_MAX': None} + self.app.loader = Mock() + with self.fixup_context(self.app) as (f, _, _): + with patch_many('os.getcwd', 'sys.path', + 'celery.fixups.django.signals') as (cw, p, sigs): + cw.return_value = '/opt/vandelay' + f.install() + sigs.beat_embedded_init.connect.assert_called_with( + f.close_database, + ) + sigs.worker_ready.connect.assert_called_with(f.on_worker_ready) + sigs.task_prerun.connect.assert_called_with(f.on_task_prerun) + sigs.task_postrun.connect.assert_called_with(f.on_task_postrun) + sigs.worker_init.connect.assert_called_with(f.on_worker_init) + sigs.worker_process_init.connect.assert_called_with( + f.on_worker_process_init, + ) + self.assertEqual(self.app.loader.now, f.now) + self.assertEqual(self.app.loader.mail_admins, f.mail_admins) + p.append.assert_called_with('/opt/vandelay') + + def test_now(self): + with self.fixup_context(self.app) as (f, _, _): + self.assertTrue(f.now(utc=True)) + self.assertFalse(f._now.called) + self.assertTrue(f.now(utc=False)) + self.assertTrue(f._now.called) + + def test_mail_admins(self): + with self.fixup_context(self.app) as (f, _, _): + f.mail_admins('sub', 'body', True) + f._mail_admins.assert_called_with( + 'sub', 'body', fail_silently=True, + ) + + def test_on_worker_init(self): + with self.fixup_context(self.app) as (f, _, _): + f.close_database = Mock() + f.close_cache = Mock() + f.on_worker_init() + f.close_database.assert_called_with() + f.close_cache.assert_called_with() + + def test_on_worker_process_init(self): + with self.fixup_context(self.app) as (f, _, _): + with patch('celery.fixups.django._maybe_close_fd') as mcf: + _all = f._db.connections.all = Mock() + conns = _all.return_value = [ + Mock(), Mock(), + ] + conns[0].connection = None + with patch.object(f, 'close_cache'): + with patch.object(f, '_close_database'): + f.on_worker_process_init() + mcf.assert_called_with(conns[1].connection) + f.close_cache.assert_called_with() + f._close_database.assert_called_with() + + mcf.reset_mock() + _all.side_effect = AttributeError() + f.on_worker_process_init() + mcf.assert_called_with(f._db.connection.connection) + f._db.connection = None + f.on_worker_process_init() + + def test_on_task_prerun(self): + task = Mock() + with self.fixup_context(self.app) as (f, _, _): + task.request.is_eager = False + with patch.object(f, 'close_database'): + f.on_task_prerun(task) + f.close_database.assert_called_with() + + task.request.is_eager = True + with patch.object(f, 'close_database'): + f.on_task_prerun(task) + self.assertFalse(f.close_database.called) + + def test_on_task_postrun(self): + task = Mock() + with self.fixup_context(self.app) as (f, _, _): + with patch.object(f, 'close_cache'): + task.request.is_eager = False + with patch.object(f, 'close_database'): + f.on_task_postrun(task) + self.assertTrue(f.close_database.called) + self.assertTrue(f.close_cache.called) + + # when a task is eager, do not close connections + with patch.object(f, 'close_cache'): + task.request.is_eager = True + with patch.object(f, 'close_database'): + f.on_task_postrun(task) + self.assertFalse(f.close_database.called) + self.assertFalse(f.close_cache.called) + + def test_close_database(self): + with self.fixup_context(self.app) as (f, _, _): + f._close_old_connections = Mock() + f.close_database() + f._close_old_connections.assert_called_with() + f._close_old_connections = None + with patch.object(f, '_close_database') as _close: + f.db_reuse_max = None + f.close_database() + _close.assert_called_with() + _close.reset_mock() + + f.db_reuse_max = 10 + f._db_recycles = 3 + f.close_database() + self.assertFalse(_close.called) + self.assertEqual(f._db_recycles, 4) + _close.reset_mock() + + f._db_recycles = 20 + f.close_database() + _close.assert_called_with() + self.assertEqual(f._db_recycles, 1) + + def test__close_database(self): + with self.fixup_context(self.app) as (f, _, _): + conns = f._db.connections = [Mock(), Mock(), Mock()] + conns[1].close.side_effect = KeyError('already closed') + f.database_errors = (KeyError, ) + + f._close_database() + conns[0].close.assert_called_with() + conns[1].close.assert_called_with() + conns[2].close.assert_called_with() + + conns[1].close.side_effect = KeyError('omg') + with self.assertRaises(KeyError): + f._close_database() + + class Object(object): + pass + o = Object() + o.close_connection = Mock() + f._db = o + f._close_database() + o.close_connection.assert_called_with() + + def test_close_cache(self): + with self.fixup_context(self.app) as (f, _, _): + f.close_cache() + f._cache.cache.close.assert_called_with() + f._cache.cache.close.side_effect = TypeError() + f.close_cache() + + def test_on_worker_ready(self): + with self.fixup_context(self.app) as (f, _, _): + f._settings.DEBUG = False + f.on_worker_ready() + with self.assertWarnsRegex(UserWarning, r'leads to a memory leak'): + f._settings.DEBUG = True + f.on_worker_ready() + + def test_mysql_errors(self): + with patch_modules('MySQLdb'): + import MySQLdb as mod + mod.DatabaseError = Mock() + mod.InterfaceError = Mock() + mod.OperationalError = Mock() + with self.fixup_context(self.app) as (f, _, _): + self.assertIn(mod.DatabaseError, f.database_errors) + self.assertIn(mod.InterfaceError, f.database_errors) + self.assertIn(mod.OperationalError, f.database_errors) + with mask_modules('MySQLdb'): + with self.fixup_context(self.app): + pass + + def test_pg_errors(self): + with patch_modules('psycopg2'): + import psycopg2 as mod + mod.DatabaseError = Mock() + mod.InterfaceError = Mock() + mod.OperationalError = Mock() + with self.fixup_context(self.app) as (f, _, _): + self.assertIn(mod.DatabaseError, f.database_errors) + self.assertIn(mod.InterfaceError, f.database_errors) + self.assertIn(mod.OperationalError, f.database_errors) + with mask_modules('psycopg2'): + with self.fixup_context(self.app): + pass + + def test_sqlite_errors(self): + with patch_modules('sqlite3'): + import sqlite3 as mod + mod.DatabaseError = Mock() + mod.InterfaceError = Mock() + mod.OperationalError = Mock() + with self.fixup_context(self.app) as (f, _, _): + self.assertIn(mod.DatabaseError, f.database_errors) + self.assertIn(mod.InterfaceError, f.database_errors) + self.assertIn(mod.OperationalError, f.database_errors) + with mask_modules('sqlite3'): + with self.fixup_context(self.app): + pass + + def test_oracle_errors(self): + with patch_modules('cx_Oracle'): + import cx_Oracle as mod + mod.DatabaseError = Mock() + mod.InterfaceError = Mock() + mod.OperationalError = Mock() + with self.fixup_context(self.app) as (f, _, _): + self.assertIn(mod.DatabaseError, f.database_errors) + self.assertIn(mod.InterfaceError, f.database_errors) + self.assertIn(mod.OperationalError, f.database_errors) + with mask_modules('cx_Oracle'): + with self.fixup_context(self.app): + pass diff --git a/awx/lib/site-packages/celery/tests/functional/case.py b/awx/lib/site-packages/celery/tests/functional/case.py index a11fbc5413..298c684666 100644 --- a/awx/lib/site-packages/celery/tests/functional/case.py +++ b/awx/lib/site-packages/celery/tests/functional/case.py @@ -11,11 +11,12 @@ import traceback from itertools import count from time import time +from celery import current_app from celery.exceptions import TimeoutError -from celery.task.control import ping, flatten_reply, inspect +from celery.app.control import flatten_reply from celery.utils.imports import qualname -from celery.tests.utils import Case +from celery.tests.case import Case HOSTNAME = socket.gethostname() @@ -36,12 +37,13 @@ def try_while(fun, reason='Timed out', timeout=10, interval=0.5): class Worker(object): started = False - next_worker_id = count(1).next + worker_ids = count(1) _shutdown_called = False - def __init__(self, hostname, loglevel='error'): + def __init__(self, hostname, loglevel='error', app=None): self.hostname = hostname self.loglevel = loglevel + self.app = app or current_app._get_current_object() def start(self): if not self.started: @@ -51,16 +53,17 @@ class Worker(object): def _fork_and_exec(self): pid = os.fork() if pid == 0: - from celery import current_app - current_app.worker_main(['celeryd', '--loglevel=INFO', - '-n', self.hostname, - '-P', 'solo']) + self.app.worker_main(['worker', '--loglevel=INFO', + '-n', self.hostname, + '-P', 'solo']) os._exit(0) self.pid = pid + def ping(self, *args, **kwargs): + return self.app.control.ping(*args, **kwargs) + def is_alive(self, timeout=1): - r = ping(destination=[self.hostname], - timeout=timeout) + r = self.ping(destination=[self.hostname], timeout=timeout) return self.hostname in flatten_reply(r) def wait_until_started(self, timeout=10, interval=0.5): @@ -91,7 +94,7 @@ class Worker(object): if caller: hostname = '.'.join([qualname(caller), hostname]) else: - hostname += str(cls.next_worker_id()) + hostname += str(next(cls.worker_ids())) worker = cls(hostname) worker.ensure_started() stack = traceback.format_stack() @@ -124,7 +127,8 @@ class WorkerCase(Case): self.assertTrue(self.worker.is_alive) def inspect(self, timeout=1): - return inspect([self.worker.hostname], timeout=timeout) + return self.app.control.inspect([self.worker.hostname], + timeout=timeout) def my_response(self, response): return flatten_reply(response)[self.worker.hostname] diff --git a/awx/lib/site-packages/celery/tests/functional/tasks.py b/awx/lib/site-packages/celery/tests/functional/tasks.py index b094667057..85479b47be 100644 --- a/awx/lib/site-packages/celery/tests/functional/tasks.py +++ b/awx/lib/site-packages/celery/tests/functional/tasks.py @@ -2,7 +2,7 @@ from __future__ import absolute_import import time -from celery import task, subtask +from celery import task, signature @task() @@ -14,7 +14,7 @@ def add(x, y): def add_cb(x, y, callback=None): result = x + y if callback: - return subtask(callback).apply_async(result) + return signature(callback).apply_async(result) return result diff --git a/awx/lib/site-packages/celery/tests/security/__init__.py b/awx/lib/site-packages/celery/tests/security/__init__.py index 6aa1c2760b..50b7f4ca54 100644 --- a/awx/lib/site-packages/celery/tests/security/__init__.py +++ b/awx/lib/site-packages/celery/tests/security/__init__.py @@ -1,19 +1,24 @@ from __future__ import absolute_import +""" +Keys and certificates for tests (KEY1 is a private key of CERT1, etc.) +Generated with `extra/security/get-cert.sh` + +""" KEY1 = """-----BEGIN RSA PRIVATE KEY----- -MIICXgIBAAKBgQDCsmLC+eqL4z6bhtv0nzbcnNXuQrZUoh827jGfDI3kxNZ2LbEy -kJOn7GIl2tPpcY2Dm1sOM8G1XLm/8Izprp4ifpF4Gi0mqz0GquY5dcMNASG9zkRO -J1z8dQUyp3PIUHdQdrKbYQVifkA4dh6Kg27k8/IcdY1lHsaIju4bX7MADwIDAQAB -AoGBAKWpCRWjdiluwu+skO0Up6aRIAop42AhzfN8OuZ81SMJRP2rJTHECI8COATD -rDneb63Ce3ibG0BI1Jf3gr624D806xVqK/SVHZNbfWx0daE3Q43DDk1UdhRF5+0X -HPqqU/IdeW1YGyWJi+IhMTXyGqhZ1BTN+4vHL7NlRpDt6JOpAkEA+xvfRO4Ca7Lw -NEgvW7n+/L9b+xygQBtOA5s260pO+8jMrXvOdCjISaKHD8HZGFN9oUmLsDXXBhjh -j0WCMdsHbQJBAMZ9OIw6M/Uxv5ANPCD58p6PZTb0knXVPMYBFQ7Y/h2HZzqbEyiI -DLGZpAa9/IhVkoCULd/TNytz5rl27KEni+sCQArFQEdZmhr6etkTO4zIpoo6vvw/ -VxRI14jKEIn5Dvg3vae3RryuvyCBax+e5evoMNxJJkexl354dLxLc/ElfuUCQQCq -U14pBvD7ITuPM6w7aAEIi2iBZhIgR2GlT9xwJ0i4si6lHdms2EJ8TKlyl6mSnEvh -RkavYSJgiU6eLC0WhUcNAkEA7vuNcz/uuckmq870qfSzUQJIYLzwVOadEdEEAVy0 -L0usztlKmAH8U/ceQMMJLMI9W4m680JrMf3iS7f+SkgUTA== +MIICXQIBAAKBgQC9Twh0V5q/R1Q8N+Y+CNM4lj9AXeZL0gYowoK1ht2ZLCDU9vN5 +dhV0x3sqaXLjQNeCGd6b2vTbFGdF2E45//IWz6/BdPFWaPm0rtYbcxZHqXDZScRp +vFDLHhMysdqQWHxXVxpqIXXo4B7bnfnGvXhYwYITeEyQylV/rnH53mdV8wIDAQAB +AoGBAKUJN4elr+S9nHP7D6BZNTsJ0Q6eTd0ftfrmx+jVMG8Oh3jh6ZSkG0R5e6iX +0W7I4pgrUWRyWDB98yJy1o+90CAN/D80o8SbmW/zfA2WLBteOujMfCEjNrc/Nodf +6MZ0QQ6PnPH6pp94i3kNmFD8Mlzm+ODrUjPF0dCNf474qeKhAkEA7SXj5cQPyQXM +s15oGX5eb6VOk96eAPtEC72cLSh6o+VYmXyGroV1A2JPm6IzH87mTqjWXG229hjt +XVvDbdY2uQJBAMxblWFaWJhhU6Y1euazaBl/OyLYlqNz4LZ0RzCulEoV/gMGYU32 +PbilD5fpFsyhp5oCxnWNEsUFovYMKjKM3AsCQQCIlOcBoP76ZxWzRK8t56MaKBnu +fiuAIzbYkDbPp12i4Wc61wZ2ozR2Y3u4Bh3tturb6M+04hea+1ZSC5StwM85AkAp +UPLYpe13kWXaGsHoVqlbTk/kcamzDkCGYufpvcIZYGzkq6uMmZZM+II4klWbtasv +BhSdu5Hp54PU/wyg/72VAkBy1/oM3/QJ35Vb6TByHBLFR4nOuORoRclmxcoCPva9 +xqkQQn+UgBtOemRXpFCuKaoXonA3nLeB54SWcC6YUOcR -----END RSA PRIVATE KEY-----""" KEY2 = """-----BEGIN RSA PRIVATE KEY----- @@ -33,17 +38,19 @@ Fxeq/HOp9JYw4gRu6Ycvqu57KHwpHhR0FCXRBxuYcJ5V -----END RSA PRIVATE KEY-----""" CERT1 = """-----BEGIN CERTIFICATE----- -MIICATCCAWoCCQCR6B3XQcBOvjANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB -VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 -cyBQdHkgTHRkMB4XDTExMDcxOTA5MDgyMloXDTEyMDcxODA5MDgyMlowRTELMAkG -A1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0 -IFdpZGdpdHMgUHR5IEx0ZDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwrJi -wvnqi+M+m4bb9J823JzV7kK2VKIfNu4xnwyN5MTWdi2xMpCTp+xiJdrT6XGNg5tb -DjPBtVy5v/CM6a6eIn6ReBotJqs9BqrmOXXDDQEhvc5ETidc/HUFMqdzyFB3UHay -m2EFYn5AOHYeioNu5PPyHHWNZR7GiI7uG1+zAA8CAwEAATANBgkqhkiG9w0BAQUF -AAOBgQA4+OiJ+pyq9lbEMFYC9K2+e77noHJkwUOs4wO6p1R14ZqSmoIszQ7KEBiH -2HHPMUY6kt4GL1aX4Vr1pUlXXdH5WaEk0fvDYZemILDMqIQJ9ettx8KihZjFGC4k -Y4Sy5xmqdE9Kjjd854gTRRnzpMnJp6+74Ki2X8GHxn3YBM+9Ng== +MIICVzCCAcACCQC72PP7b7H9BTANBgkqhkiG9w0BAQUFADBwMQswCQYDVQQGEwJV +UzELMAkGA1UECBMCQ0ExCzAJBgNVBAcTAlNGMQ8wDQYDVQQKEwZDZWxlcnkxDzAN +BgNVBAMTBkNFbGVyeTElMCMGCSqGSIb3DQEJARYWY2VydEBjZWxlcnlwcm9qZWN0 +Lm9yZzAeFw0xMzA3MjQxMjExMTRaFw0xNDA3MjQxMjExMTRaMHAxCzAJBgNVBAYT +AlVTMQswCQYDVQQIEwJDQTELMAkGA1UEBxMCU0YxDzANBgNVBAoTBkNlbGVyeTEP +MA0GA1UEAxMGQ0VsZXJ5MSUwIwYJKoZIhvcNAQkBFhZjZXJ0QGNlbGVyeXByb2pl +Y3Qub3JnMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC9Twh0V5q/R1Q8N+Y+ +CNM4lj9AXeZL0gYowoK1ht2ZLCDU9vN5dhV0x3sqaXLjQNeCGd6b2vTbFGdF2E45 +//IWz6/BdPFWaPm0rtYbcxZHqXDZScRpvFDLHhMysdqQWHxXVxpqIXXo4B7bnfnG +vXhYwYITeEyQylV/rnH53mdV8wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAKA4tD3J +94tsnQxFxHP7Frt7IvGMH+3wMqOiXFgYxPJX2tyaPvOLJ/7ERE4MkrvZO7IRC0iA +yKBe0pucdrTgsJoDV8juahuyjXOjvU14+q7Wv7pj7zqddVavzK8STLX4/FMIDnbK +aMGJl7wyj6V2yy6ANSbmy0uQjHikI6DrZEoK -----END CERTIFICATE-----""" CERT2 = """-----BEGIN CERTIFICATE----- diff --git a/awx/lib/site-packages/celery/tests/security/case.py b/awx/lib/site-packages/celery/tests/security/case.py index 5d2828a794..4440f4963a 100644 --- a/awx/lib/site-packages/celery/tests/security/case.py +++ b/awx/lib/site-packages/celery/tests/security/case.py @@ -1,13 +1,11 @@ from __future__ import absolute_import -from nose import SkipTest - -from celery.tests.utils import Case +from celery.tests.case import AppCase, SkipTest -class SecurityCase(Case): +class SecurityCase(AppCase): - def setUp(self): + def setup(self): try: from OpenSSL import crypto # noqa except ImportError: diff --git a/awx/lib/site-packages/celery/tests/security/test_certificate.py b/awx/lib/site-packages/celery/tests/security/test_certificate.py index 5708ff1b49..84b7d8a552 100644 --- a/awx/lib/site-packages/celery/tests/security/test_certificate.py +++ b/awx/lib/site-packages/celery/tests/security/test_certificate.py @@ -1,15 +1,12 @@ from __future__ import absolute_import -from __future__ import with_statement from celery.exceptions import SecurityError from celery.security.certificate import Certificate, CertStore, FSCertStore -from mock import Mock, patch - from . import CERT1, CERT2, KEY1 from .case import SecurityCase -from celery.tests.utils import mock_open +from celery.tests.case import Mock, mock_open, patch class test_Certificate(SecurityCase): @@ -26,7 +23,7 @@ class test_Certificate(SecurityCase): self.assertRaises(SecurityError, Certificate, KEY1) def test_has_expired(self): - self.assertTrue(Certificate(CERT1).has_expired()) + self.assertFalse(Certificate(CERT1).has_expired()) class test_CertStore(SecurityCase): diff --git a/awx/lib/site-packages/celery/tests/security/test_security.py b/awx/lib/site-packages/celery/tests/security/test_security.py index afe840c4d2..227c65a5db 100644 --- a/awx/lib/site-packages/celery/tests/security/test_security.py +++ b/awx/lib/site-packages/celery/tests/security/test_security.py @@ -15,55 +15,55 @@ Generated with: """ from __future__ import absolute_import -from __future__ import with_statement -import __builtin__ +from kombu.serialization import disable_insecure_serializers -from mock import Mock, patch - -from celery import current_app -from celery.exceptions import ImproperlyConfigured -from celery.security import setup_security, disable_untrusted_serializers +from celery.exceptions import ImproperlyConfigured, SecurityError +from celery.five import builtins +from celery.security.utils import reraise_errors from kombu.serialization import registry from .case import SecurityCase -from celery.tests.utils import mock_open +from celery.tests.case import Mock, mock_open, patch class test_security(SecurityCase): - def tearDown(self): + def teardown(self): registry._disabled_content_types.clear() - def test_disable_untrusted_serializers(self): - disabled = registry._disabled_content_types - self.assertEqual(0, len(disabled)) + def test_disable_insecure_serializers(self): + try: + disabled = registry._disabled_content_types + self.assertTrue(disabled) - disable_untrusted_serializers( - ['application/json', 'application/x-python-serialize']) - self.assertIn('application/x-yaml', disabled) - self.assertNotIn('application/json', disabled) - self.assertNotIn('application/x-python-serialize', disabled) - disabled.clear() + disable_insecure_serializers( + ['application/json', 'application/x-python-serialize'], + ) + self.assertIn('application/x-yaml', disabled) + self.assertNotIn('application/json', disabled) + self.assertNotIn('application/x-python-serialize', disabled) + disabled.clear() - disable_untrusted_serializers() - self.assertIn('application/x-yaml', disabled) - self.assertIn('application/json', disabled) - self.assertIn('application/x-python-serialize', disabled) + disable_insecure_serializers(allowed=None) + self.assertIn('application/x-yaml', disabled) + self.assertIn('application/json', disabled) + self.assertIn('application/x-python-serialize', disabled) + finally: + disable_insecure_serializers(allowed=['json']) def test_setup_security(self): disabled = registry._disabled_content_types self.assertEqual(0, len(disabled)) - current_app.conf.CELERY_TASK_SERIALIZER = 'json' - - setup_security() + self.app.conf.CELERY_TASK_SERIALIZER = 'json' + self.app.setup_security() self.assertIn('application/x-python-serialize', disabled) disabled.clear() @patch('celery.security.register_auth') - @patch('celery.security.disable_untrusted_serializers') + @patch('celery.security._disable_insecure_serializers') def test_setup_registry_complete(self, dis, reg, key='KEY', cert='CERT'): calls = [0] @@ -75,24 +75,36 @@ class test_security(SecurityCase): finally: calls[0] += 1 + self.app.conf.CELERY_TASK_SERIALIZER = 'auth' with mock_open(side_effect=effect): - store = Mock() - setup_security(['json'], key, cert, store) - dis.assert_called_with(['json']) - reg.assert_called_with('A', 'B', store) + with patch('celery.security.registry') as registry: + store = Mock() + self.app.setup_security(['json'], key, cert, store) + dis.assert_called_with(['json']) + reg.assert_called_with('A', 'B', store, 'sha1', 'json') + registry._set_default_serializer.assert_called_with('auth') def test_security_conf(self): - current_app.conf.CELERY_TASK_SERIALIZER = 'auth' + self.app.conf.CELERY_TASK_SERIALIZER = 'auth' + with self.assertRaises(ImproperlyConfigured): + self.app.setup_security() - self.assertRaises(ImproperlyConfigured, setup_security) - - _import = __builtin__.__import__ + _import = builtins.__import__ def import_hook(name, *args, **kwargs): if name == 'OpenSSL': raise ImportError return _import(name, *args, **kwargs) - __builtin__.__import__ = import_hook - self.assertRaises(ImproperlyConfigured, setup_security) - __builtin__.__import__ = _import + builtins.__import__ = import_hook + with self.assertRaises(ImproperlyConfigured): + self.app.setup_security() + builtins.__import__ = _import + + def test_reraise_errors(self): + with self.assertRaises(SecurityError): + with reraise_errors(errors=(KeyError, )): + raise KeyError('foo') + with self.assertRaises(KeyError): + with reraise_errors(errors=(ValueError, )): + raise KeyError('bar') diff --git a/awx/lib/site-packages/celery/tests/security/test_serialization.py b/awx/lib/site-packages/celery/tests/security/test_serialization.py index 3f0704d077..50bc4bfab4 100644 --- a/awx/lib/site-packages/celery/tests/security/test_serialization.py +++ b/awx/lib/site-packages/celery/tests/security/test_serialization.py @@ -1,11 +1,14 @@ from __future__ import absolute_import -from celery.exceptions import SecurityError +import os +import base64 +from kombu.serialization import registry + +from celery.exceptions import SecurityError from celery.security.serialization import SecureSerializer, register_auth from celery.security.certificate import Certificate, CertStore from celery.security.key import PrivateKey -from kombu.serialization import registry from . import CERT1, CERT2, KEY1, KEY2 from .case import SecurityCase @@ -53,3 +56,9 @@ class test_SecureSerializer(SecurityCase): def test_register_auth(self): register_auth(KEY1, CERT1, '') self.assertIn('application/data', registry._decoders) + + def test_lots_of_sign(self): + for i in range(1000): + rdata = base64.urlsafe_b64encode(os.urandom(265)) + s = self._get_s(KEY1, CERT1, [CERT1]) + self.assertEqual(s.deserialize(s.serialize(rdata)), rdata) diff --git a/awx/lib/site-packages/celery/tests/slow/test_buckets.py b/awx/lib/site-packages/celery/tests/slow/test_buckets.py deleted file mode 100644 index 22b7c8bcf9..0000000000 --- a/awx/lib/site-packages/celery/tests/slow/test_buckets.py +++ /dev/null @@ -1,346 +0,0 @@ -from __future__ import absolute_import -from __future__ import with_statement - -import sys -import time - -from functools import partial -from itertools import chain, izip -from Queue import Empty - -from mock import Mock, patch - -from celery.app.registry import TaskRegistry -from celery.task.base import Task -from celery.utils import timeutils -from celery.utils import uuid -from celery.worker import buckets - -from celery.tests.utils import Case, skip_if_environ, mock_context - -skip_if_disabled = partial(skip_if_environ('SKIP_RLIMITS')) - - -class MockJob(object): - - def __init__(self, id, name, args, kwargs): - self.id = id - self.name = name - self.args = args - self.kwargs = kwargs - - def __eq__(self, other): - if isinstance(other, self.__class__): - return bool(self.id == other.id - and self.name == other.name - and self.args == other.args - and self.kwargs == other.kwargs) - else: - return self == other - - def __repr__(self): - return '' % ( self.task1['id'])) self.assertEqual(repr(ok2_res), '' % ( @@ -187,32 +192,32 @@ class test_AsyncResult(AppCase): self.task3['id'])) pending_id = uuid() - pending_res = AsyncResult(pending_id) + pending_res = self.app.AsyncResult(pending_id) self.assertEqual(repr(pending_res), '' % ( pending_id)) def test_hash(self): - self.assertEqual(hash(AsyncResult('x0w991')), - hash(AsyncResult('x0w991'))) - self.assertNotEqual(hash(AsyncResult('x0w991')), - hash(AsyncResult('x1w991'))) + self.assertEqual(hash(self.app.AsyncResult('x0w991')), + hash(self.app.AsyncResult('x0w991'))) + self.assertNotEqual(hash(self.app.AsyncResult('x0w991')), + hash(self.app.AsyncResult('x1w991'))) def test_get_traceback(self): - ok_res = AsyncResult(self.task1['id']) - nok_res = AsyncResult(self.task3['id']) - nok_res2 = AsyncResult(self.task4['id']) + ok_res = self.app.AsyncResult(self.task1['id']) + nok_res = self.app.AsyncResult(self.task3['id']) + nok_res2 = self.app.AsyncResult(self.task4['id']) self.assertFalse(ok_res.traceback) self.assertTrue(nok_res.traceback) self.assertTrue(nok_res2.traceback) - pending_res = AsyncResult(uuid()) + pending_res = self.app.AsyncResult(uuid()) self.assertFalse(pending_res.traceback) def test_get(self): - ok_res = AsyncResult(self.task1['id']) - ok2_res = AsyncResult(self.task2['id']) - nok_res = AsyncResult(self.task3['id']) - nok2_res = AsyncResult(self.task4['id']) + ok_res = self.app.AsyncResult(self.task1['id']) + ok2_res = self.app.AsyncResult(self.task2['id']) + nok_res = self.app.AsyncResult(self.task3['id']) + nok2_res = self.app.AsyncResult(self.task4['id']) self.assertEqual(ok_res.get(), 'the') self.assertEqual(ok2_res.get(), 'quick') @@ -223,41 +228,45 @@ class test_AsyncResult(AppCase): self.assertEqual(ok_res.info, 'the') def test_get_timeout(self): - res = AsyncResult(self.task4['id']) # has RETRY state + res = self.app.AsyncResult(self.task4['id']) # has RETRY state with self.assertRaises(TimeoutError): - res.get(timeout=0.1) + res.get(timeout=0.001) - pending_res = AsyncResult(uuid()) - with self.assertRaises(TimeoutError): - pending_res.get(timeout=0.1) + pending_res = self.app.AsyncResult(uuid()) + with patch('celery.result.time') as _time: + with self.assertRaises(TimeoutError): + pending_res.get(timeout=0.001, interval=0.001) + _time.sleep.assert_called_with(0.001) - @skip_if_quick def test_get_timeout_longer(self): - res = AsyncResult(self.task4['id']) # has RETRY state - with self.assertRaises(TimeoutError): - res.get(timeout=1) + res = self.app.AsyncResult(self.task4['id']) # has RETRY state + with patch('celery.result.time') as _time: + with self.assertRaises(TimeoutError): + res.get(timeout=1, interval=1) + _time.sleep.assert_called_with(1) def test_ready(self): - oks = (AsyncResult(self.task1['id']), - AsyncResult(self.task2['id']), - AsyncResult(self.task3['id'])) + oks = (self.app.AsyncResult(self.task1['id']), + self.app.AsyncResult(self.task2['id']), + self.app.AsyncResult(self.task3['id'])) self.assertTrue(all(result.ready() for result in oks)) - self.assertFalse(AsyncResult(self.task4['id']).ready()) + self.assertFalse(self.app.AsyncResult(self.task4['id']).ready()) - self.assertFalse(AsyncResult(uuid()).ready()) + self.assertFalse(self.app.AsyncResult(uuid()).ready()) class test_ResultSet(AppCase): def test_resultset_repr(self): - self.assertTrue(repr(ResultSet(map(AsyncResult, ['1', '2', '3'])))) + self.assertTrue(repr(self.app.ResultSet( + [self.app.AsyncResult(t) for t in ['1', '2', '3']]))) def test_eq_other(self): - self.assertFalse(ResultSet([1, 3, 3]) == 1) - self.assertTrue(ResultSet([1]) == ResultSet([1])) + self.assertFalse(self.app.ResultSet([1, 3, 3]) == 1) + self.assertTrue(self.app.ResultSet([1]) == self.app.ResultSet([1])) def test_get(self): - x = ResultSet(map(AsyncResult, [1, 2, 3])) + x = self.app.ResultSet([self.app.AsyncResult(t) for t in [1, 2, 3]]) b = x.results[0].backend = Mock() b.supports_native_join = False x.join_native = Mock() @@ -269,25 +278,74 @@ class test_ResultSet(AppCase): self.assertTrue(x.join_native.called) def test_add(self): - x = ResultSet([1]) + x = self.app.ResultSet([1]) x.add(2) self.assertEqual(len(x), 2) x.add(2) self.assertEqual(len(x), 2) + @contextmanager + def dummy_copy(self): + with patch('celery.result.copy') as copy: + + def passt(arg): + return arg + copy.side_effect = passt + + yield + + def test_iterate_respects_subpolling_interval(self): + r1 = self.app.AsyncResult(uuid()) + r2 = self.app.AsyncResult(uuid()) + backend = r1.backend = r2.backend = Mock() + backend.subpolling_interval = 10 + + ready = r1.ready = r2.ready = Mock() + + def se(*args, **kwargs): + ready.side_effect = KeyError() + return False + ready.return_value = False + ready.side_effect = se + + x = self.app.ResultSet([r1, r2]) + with self.dummy_copy(): + with patch('celery.result.time') as _time: + with self.assertRaises(KeyError): + list(x.iterate()) + _time.sleep.assert_called_with(10) + + backend.subpolling_interval = 0 + with patch('celery.result.time') as _time: + with self.assertRaises(KeyError): + ready.return_value = False + ready.side_effect = se + list(x.iterate()) + self.assertFalse(_time.sleep.called) + + def test_times_out(self): + r1 = self.app.AsyncResult(uuid) + r1.ready = Mock() + r1.ready.return_value = False + x = self.app.ResultSet([r1]) + with self.dummy_copy(): + with patch('celery.result.time'): + with self.assertRaises(TimeoutError): + list(x.iterate(timeout=1)) + def test_add_discard(self): - x = ResultSet([]) - x.add(AsyncResult('1')) - self.assertIn(AsyncResult('1'), x.results) - x.discard(AsyncResult('1')) - x.discard(AsyncResult('1')) + x = self.app.ResultSet([]) + x.add(self.app.AsyncResult('1')) + self.assertIn(self.app.AsyncResult('1'), x.results) + x.discard(self.app.AsyncResult('1')) + x.discard(self.app.AsyncResult('1')) x.discard('1') - self.assertNotIn(AsyncResult('1'), x.results) + self.assertNotIn(self.app.AsyncResult('1'), x.results) - x.update([AsyncResult('2')]) + x.update([self.app.AsyncResult('2')]) def test_clear(self): - x = ResultSet([]) + x = self.app.ResultSet([]) r = x.results x.clear() self.assertIs(x.results, r) @@ -342,7 +400,7 @@ class test_TaskSetResult(AppCase): def setup(self): self.size = 10 - self.ts = TaskSetResult(uuid(), make_mock_group(self.size)) + self.ts = TaskSetResult(uuid(), make_mock_group(self.app, self.size)) def test_total(self): self.assertEqual(self.ts.total, self.size) @@ -367,7 +425,16 @@ class test_GroupResult(AppCase): def setup(self): self.size = 10 - self.ts = GroupResult(uuid(), make_mock_group(self.size)) + self.ts = self.app.GroupResult( + uuid(), make_mock_group(self.app, self.size), + ) + + @depends_on_current_app + def test_is_pickleable(self): + ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) + self.assertEqual(pickle.loads(pickle.dumps(ts)), ts) + ts2 = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) + self.assertEqual(pickle.loads(pickle.dumps(ts2)), ts2) def test_len(self): self.assertEqual(len(self.ts), self.size) @@ -375,93 +442,134 @@ class test_GroupResult(AppCase): def test_eq_other(self): self.assertFalse(self.ts == 1) + @depends_on_current_app def test_reduce(self): - self.assertTrue(loads(dumps(self.ts))) + self.assertTrue(pickle.loads(pickle.dumps(self.ts))) def test_iterate_raises(self): - ar = MockAsyncResultFailure(uuid()) - ts = GroupResult(uuid(), [ar]) - it = iter(ts) + ar = MockAsyncResultFailure(uuid(), app=self.app) + ts = self.app.GroupResult(uuid(), [ar]) + it = ts.iterate() with self.assertRaises(KeyError): - it.next() + next(it) def test_forget(self): - subs = [MockAsyncResultSuccess(uuid()), - MockAsyncResultSuccess(uuid())] - ts = GroupResult(uuid(), subs) + subs = [MockAsyncResultSuccess(uuid(), app=self.app), + MockAsyncResultSuccess(uuid(), app=self.app)] + ts = self.app.GroupResult(uuid(), subs) ts.forget() for sub in subs: self.assertTrue(sub.forgotten) def test_getitem(self): - subs = [MockAsyncResultSuccess(uuid()), - MockAsyncResultSuccess(uuid())] - ts = GroupResult(uuid(), subs) + subs = [MockAsyncResultSuccess(uuid(), app=self.app), + MockAsyncResultSuccess(uuid(), app=self.app)] + ts = self.app.GroupResult(uuid(), subs) self.assertIs(ts[0], subs[0]) def test_save_restore(self): - subs = [MockAsyncResultSuccess(uuid()), - MockAsyncResultSuccess(uuid())] - ts = GroupResult(uuid(), subs) + subs = [MockAsyncResultSuccess(uuid(), app=self.app), + MockAsyncResultSuccess(uuid(), app=self.app)] + ts = self.app.GroupResult(uuid(), subs) ts.save() with self.assertRaises(AttributeError): ts.save(backend=object()) - self.assertEqual(GroupResult.restore(ts.id).subtasks, + self.assertEqual(self.app.GroupResult.restore(ts.id).subtasks, ts.subtasks) ts.delete() - self.assertIsNone(GroupResult.restore(ts.id)) + self.assertIsNone(self.app.GroupResult.restore(ts.id)) with self.assertRaises(AttributeError): - GroupResult.restore(ts.id, backend=object()) + self.app.GroupResult.restore(ts.id, backend=object()) def test_join_native(self): backend = SimpleBackend() - subtasks = [AsyncResult(uuid(), backend=backend) + subtasks = [self.app.AsyncResult(uuid(), backend=backend) for i in range(10)] - ts = GroupResult(uuid(), subtasks) + ts = self.app.GroupResult(uuid(), subtasks) backend.ids = [subtask.id for subtask in subtasks] res = ts.join_native() - self.assertEqual(res, range(10)) + self.assertEqual(res, list(range(10))) + + def test_join_native_raises(self): + ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) + ts.iter_native = Mock() + ts.iter_native.return_value = iter([ + (uuid(), {'status': states.FAILURE, 'result': KeyError()}) + ]) + with self.assertRaises(KeyError): + ts.join_native(propagate=True) + + def test_failed_join_report(self): + res = Mock() + ts = self.app.GroupResult(uuid(), [res]) + res.state = states.FAILURE + res.backend.is_cached.return_value = True + self.assertIs(next(ts._failed_join_report()), res) + res.backend.is_cached.return_value = False + with self.assertRaises(StopIteration): + next(ts._failed_join_report()) + + def test_repr(self): + self.assertTrue(repr( + self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) + )) + + def test_children_is_results(self): + ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) + self.assertIs(ts.children, ts.results) def test_iter_native(self): backend = SimpleBackend() - subtasks = [AsyncResult(uuid(), backend=backend) + subtasks = [self.app.AsyncResult(uuid(), backend=backend) for i in range(10)] - ts = GroupResult(uuid(), subtasks) + ts = self.app.GroupResult(uuid(), subtasks) backend.ids = [subtask.id for subtask in subtasks] self.assertEqual(len(list(ts.iter_native())), 10) def test_iterate_yields(self): - ar = MockAsyncResultSuccess(uuid()) - ar2 = MockAsyncResultSuccess(uuid()) - ts = GroupResult(uuid(), [ar, ar2]) - it = iter(ts) - self.assertEqual(it.next(), 42) - self.assertEqual(it.next(), 42) + ar = MockAsyncResultSuccess(uuid(), app=self.app) + ar2 = MockAsyncResultSuccess(uuid(), app=self.app) + ts = self.app.GroupResult(uuid(), [ar, ar2]) + it = ts.iterate() + self.assertEqual(next(it), 42) + self.assertEqual(next(it), 42) def test_iterate_eager(self): ar1 = EagerResult(uuid(), 42, states.SUCCESS) ar2 = EagerResult(uuid(), 42, states.SUCCESS) - ts = GroupResult(uuid(), [ar1, ar2]) - it = iter(ts) - self.assertEqual(it.next(), 42) - self.assertEqual(it.next(), 42) + ts = self.app.GroupResult(uuid(), [ar1, ar2]) + it = ts.iterate() + self.assertEqual(next(it), 42) + self.assertEqual(next(it), 42) def test_join_timeout(self): - ar = MockAsyncResultSuccess(uuid()) - ar2 = MockAsyncResultSuccess(uuid()) - ar3 = AsyncResult(uuid()) - ts = GroupResult(uuid(), [ar, ar2, ar3]) + ar = MockAsyncResultSuccess(uuid(), app=self.app) + ar2 = MockAsyncResultSuccess(uuid(), app=self.app) + ar3 = self.app.AsyncResult(uuid()) + ts = self.app.GroupResult(uuid(), [ar, ar2, ar3]) with self.assertRaises(TimeoutError): ts.join(timeout=0.0000001) - def test___iter__(self): - it = iter(self.ts) + ar4 = self.app.AsyncResult(uuid()) + ar4.get = Mock() + ts2 = self.app.GroupResult(uuid(), [ar4]) + self.assertTrue(ts2.join(timeout=0.1)) + + def test_iter_native_when_empty_group(self): + ts = self.app.GroupResult(uuid(), []) + self.assertListEqual(list(ts.iter_native()), []) + + def test_iterate_simple(self): + it = self.ts.iterate() results = sorted(list(it)) - self.assertListEqual(results, list(xrange(self.size))) + self.assertListEqual(results, list(range(self.size))) + + def test___iter__(self): + self.assertListEqual(list(iter(self.ts)), self.ts.results) def test_join(self): joined = self.ts.join() - self.assertListEqual(joined, list(xrange(self.size))) + self.assertListEqual(joined, list(range(self.size))) def test_successful(self): self.assertTrue(self.ts.successful()) @@ -482,7 +590,7 @@ class test_GroupResult(AppCase): class test_pending_AsyncResult(AppCase): def setup(self): - self.task = AsyncResult(uuid()) + self.task = self.app.AsyncResult(uuid()) def test_result(self): self.assertIsNone(self.task.result) @@ -492,17 +600,17 @@ class test_failed_AsyncResult(test_GroupResult): def setup(self): self.size = 11 - subtasks = make_mock_group(10) + subtasks = make_mock_group(self.app, 10) failed = mock_task('ts11', states.FAILURE, KeyError('Baz')) - save_result(failed) - failed_res = AsyncResult(failed['id']) - self.ts = GroupResult(uuid(), subtasks + [failed_res]) + save_result(self.app, failed) + failed_res = self.app.AsyncResult(failed['id']) + self.ts = self.app.GroupResult(uuid(), subtasks + [failed_res]) def test_completed_count(self): self.assertEqual(self.ts.completed_count(), len(self.ts) - 1) - def test___iter__(self): - it = iter(self.ts) + def test_iterate_simple(self): + it = self.ts.iterate() def consume(): return list(it) @@ -524,8 +632,9 @@ class test_failed_AsyncResult(test_GroupResult): class test_pending_Group(AppCase): def setup(self): - self.ts = GroupResult(uuid(), [AsyncResult(uuid()), - AsyncResult(uuid())]) + self.ts = self.app.GroupResult( + uuid(), [self.app.AsyncResult(uuid()), + self.app.AsyncResult(uuid())]) def test_completed_count(self): self.assertEqual(self.ts.completed_count(), 0) @@ -540,22 +649,22 @@ class test_pending_Group(AppCase): with self.assertRaises(TimeoutError): self.ts.join(timeout=0.001) - @skip_if_quick def x_join_longer(self): with self.assertRaises(TimeoutError): self.ts.join(timeout=1) -class RaisingTask(Task): - - def run(self, x, y): - raise KeyError('xy') - - class test_EagerResult(AppCase): + def setup(self): + + @self.app.task(shared=False) + def raising(x, y): + raise KeyError(x, y) + self.raising = raising + def test_wait_raises(self): - res = RaisingTask.apply(args=[3, 3]) + res = self.raising.apply(args=[3, 3]) with self.assertRaises(KeyError): res.wait() self.assertTrue(res.wait(propagate=False)) @@ -571,18 +680,33 @@ class test_EagerResult(AppCase): res.forget() def test_revoke(self): - res = RaisingTask.apply(args=[3, 3]) + res = self.raising.apply(args=[3, 3]) self.assertFalse(res.revoke()) -class test_serializable(AppCase): +class test_tuples(AppCase): def test_AsyncResult(self): - x = AsyncResult(uuid()) - self.assertEqual(x, from_serializable(x.serializable(), self.app)) - self.assertEqual(x, from_serializable(x, self.app)) + x = self.app.AsyncResult(uuid()) + self.assertEqual(x, result_from_tuple(x.as_tuple(), self.app)) + self.assertEqual(x, result_from_tuple(x, self.app)) + + def test_with_parent(self): + x = self.app.AsyncResult(uuid()) + x.parent = self.app.AsyncResult(uuid()) + y = result_from_tuple(x.as_tuple(), self.app) + self.assertEqual(y, x) + self.assertEqual(y.parent, x.parent) + self.assertIsInstance(y.parent, AsyncResult) + + def test_compat(self): + uid = uuid() + x = result_from_tuple([uid, []], app=self.app) + self.assertEqual(x.id, uid) def test_GroupResult(self): - x = GroupResult(uuid(), [AsyncResult(uuid()) for _ in range(10)]) - self.assertEqual(x, from_serializable(x.serializable(), self.app)) - self.assertEqual(x, from_serializable(x, self.app)) + x = self.app.GroupResult( + uuid(), [self.app.AsyncResult(uuid()) for _ in range(10)], + ) + self.assertEqual(x, result_from_tuple(x.as_tuple(), self.app)) + self.assertEqual(x, result_from_tuple(x, self.app)) diff --git a/awx/lib/site-packages/celery/tests/tasks/test_states.py b/awx/lib/site-packages/celery/tests/tasks/test_states.py index 4acf8aafee..b30a4ee6a5 100644 --- a/awx/lib/site-packages/celery/tests/tasks/test_states.py +++ b/awx/lib/site-packages/celery/tests/tasks/test_states.py @@ -2,7 +2,7 @@ from __future__ import absolute_import from celery.states import state from celery import states -from celery.tests.utils import Case +from celery.tests.case import Case class test_state_precedence(Case): diff --git a/awx/lib/site-packages/celery/tests/tasks/test_tasks.py b/awx/lib/site-packages/celery/tests/tasks/test_tasks.py index 65e6e77862..7269794f37 100644 --- a/awx/lib/site-packages/celery/tests/tasks/test_tasks.py +++ b/awx/lib/site-packages/celery/tests/tasks/test_tasks.py @@ -1,36 +1,19 @@ from __future__ import absolute_import -from __future__ import with_statement - -import time +from collections import Callable from datetime import datetime, timedelta -from functools import wraps -from mock import patch -from pickle import loads, dumps -from celery.task import ( - current, - task, - Task, - BaseTask, - TaskSet, - periodic_task, - PeriodicTask -) -from celery import current_app -from celery.app import app_or_default -from celery.exceptions import RetryTaskError -from celery.execute import send_task +from kombu import Queue + +from celery import Task + +from celery.exceptions import Retry +from celery.five import items, range, string_t from celery.result import EagerResult -from celery.schedules import crontab, crontab_parser, ParseException from celery.utils import uuid -from celery.utils.timeutils import parse_iso8601, timedelta_seconds +from celery.utils.timeutils import parse_iso8601 -from celery.tests.utils import Case, with_eager_tasks, WhateverIO - - -def now(): - return current_app.now() +from celery.tests.case import AppCase, depends_on_current_app, patch def return_True(*args, **kwargs): @@ -38,234 +21,236 @@ def return_True(*args, **kwargs): return True -return_True_task = task()(return_True) - - def raise_exception(self, **kwargs): raise Exception('%s error' % self.__class__) class MockApplyTask(Task): + abstract = True applied = 0 def run(self, x, y): return x * y - @classmethod def apply_async(self, *args, **kwargs): self.applied += 1 -@task(name='c.unittest.increment_counter_task', count=0) -def increment_counter(increment_by=1): - increment_counter.count += increment_by or 1 - return increment_counter.count +class TasksCase(AppCase): + def setup(self): + self.mytask = self.app.task(shared=False)(return_True) -@task(name='c.unittest.raising_task') -def raising(): - raise KeyError('foo') + @self.app.task(bind=True, count=0, shared=False) + def increment_counter(self, increment_by=1): + self.count += increment_by or 1 + return self.count + self.increment_counter = increment_counter + @self.app.task(shared=False) + def raising(): + raise KeyError('foo') + self.raising = raising -@task(max_retries=3, iterations=0) -def retry_task(arg1, arg2, kwarg=1, max_retries=None, care=True): - current.iterations += 1 - rmax = current.max_retries if max_retries is None else max_retries + @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) + def retry_task(self, arg1, arg2, kwarg=1, max_retries=None, care=True): + self.iterations += 1 + rmax = self.max_retries if max_retries is None else max_retries - assert repr(current.request) - retries = current.request.retries - if care and retries >= rmax: - return arg1 - else: - raise current.retry(countdown=0, max_retries=rmax) + assert repr(self.request) + retries = self.request.retries + if care and retries >= rmax: + return arg1 + else: + raise self.retry(countdown=0, max_retries=rmax) + self.retry_task = retry_task + @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) + def retry_task_noargs(self, **kwargs): + self.iterations += 1 -@task(max_retries=3, iterations=0, accept_magic_kwargs=True) -def retry_task_noargs(**kwargs): - current.iterations += 1 + if self.request.retries >= 3: + return 42 + else: + raise self.retry(countdown=0) + self.retry_task_noargs = retry_task_noargs - retries = kwargs['task_retries'] - if retries >= 3: - return 42 - else: - raise current.retry(countdown=0) + @self.app.task(bind=True, max_retries=3, iterations=0, + base=MockApplyTask, shared=False) + def retry_task_mockapply(self, arg1, arg2, kwarg=1): + self.iterations += 1 + retries = self.request.retries + if retries >= 3: + return arg1 + raise self.retry(countdown=0) + self.retry_task_mockapply = retry_task_mockapply -@task(max_retries=3, iterations=0, base=MockApplyTask, - accept_magic_kwargs=True) -def retry_task_mockapply(arg1, arg2, kwarg=1, **kwargs): - current.iterations += 1 + @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) + def retry_task_customexc(self, arg1, arg2, kwarg=1, **kwargs): + self.iterations += 1 - retries = kwargs['task_retries'] - if retries >= 3: - return arg1 - else: - kwargs.update(kwarg=kwarg) - raise current.retry(countdown=0) + retries = self.request.retries + if retries >= 3: + return arg1 + kwarg + else: + try: + raise MyCustomException('Elaine Marie Benes') + except MyCustomException as exc: + kwargs.update(kwarg=kwarg) + raise self.retry(countdown=0, exc=exc) + self.retry_task_customexc = retry_task_customexc class MyCustomException(Exception): """Random custom exception.""" -@task(max_retries=3, iterations=0, accept_magic_kwargs=True) -def retry_task_customexc(arg1, arg2, kwarg=1, **kwargs): - current.iterations += 1 - - retries = kwargs['task_retries'] - if retries >= 3: - return arg1 + kwarg - else: - try: - raise MyCustomException('Elaine Marie Benes') - except MyCustomException, exc: - kwargs.update(kwarg=kwarg) - raise current.retry(countdown=0, exc=exc) - - -class test_task_retries(Case): +class test_task_retries(TasksCase): def test_retry(self): - retry_task.__class__.max_retries = 3 - retry_task.iterations = 0 - retry_task.apply([0xFF, 0xFFFF]) - self.assertEqual(retry_task.iterations, 4) + self.retry_task.max_retries = 3 + self.retry_task.iterations = 0 + self.retry_task.apply([0xFF, 0xFFFF]) + self.assertEqual(self.retry_task.iterations, 4) - retry_task.__class__.max_retries = 3 - retry_task.iterations = 0 - retry_task.apply([0xFF, 0xFFFF], {'max_retries': 10}) - self.assertEqual(retry_task.iterations, 11) + self.retry_task.max_retries = 3 + self.retry_task.iterations = 0 + self.retry_task.apply([0xFF, 0xFFFF], {'max_retries': 10}) + self.assertEqual(self.retry_task.iterations, 11) def test_retry_no_args(self): - assert retry_task_noargs.accept_magic_kwargs - retry_task_noargs.__class__.max_retries = 3 - retry_task_noargs.iterations = 0 - retry_task_noargs.apply() - self.assertEqual(retry_task_noargs.iterations, 4) + self.retry_task_noargs.max_retries = 3 + self.retry_task_noargs.iterations = 0 + self.retry_task_noargs.apply(propagate=True).get() + self.assertEqual(self.retry_task_noargs.iterations, 4) def test_retry_kwargs_can_be_empty(self): - retry_task_mockapply.push_request() + self.retry_task_mockapply.push_request() try: - with self.assertRaises(RetryTaskError): - retry_task_mockapply.retry(args=[4, 4], kwargs=None) + with self.assertRaises(Retry): + self.retry_task_mockapply.retry(args=[4, 4], kwargs=None) finally: - retry_task_mockapply.pop_request() + self.retry_task_mockapply.pop_request() def test_retry_not_eager(self): - retry_task_mockapply.push_request() + self.retry_task_mockapply.push_request() try: - retry_task_mockapply.request.called_directly = False + self.retry_task_mockapply.request.called_directly = False exc = Exception('baz') try: - retry_task_mockapply.retry( + self.retry_task_mockapply.retry( args=[4, 4], kwargs={'task_retries': 0}, exc=exc, throw=False, ) - self.assertTrue(retry_task_mockapply.__class__.applied) + self.assertTrue(self.retry_task_mockapply.applied) finally: - retry_task_mockapply.__class__.applied = 0 + self.retry_task_mockapply.applied = 0 try: - with self.assertRaises(RetryTaskError): - retry_task_mockapply.retry( + with self.assertRaises(Retry): + self.retry_task_mockapply.retry( args=[4, 4], kwargs={'task_retries': 0}, exc=exc, throw=True) - self.assertTrue(retry_task_mockapply.__class__.applied) + self.assertTrue(self.retry_task_mockapply.applied) finally: - retry_task_mockapply.__class__.applied = 0 + self.retry_task_mockapply.applied = 0 finally: - retry_task_mockapply.pop_request() + self.retry_task_mockapply.pop_request() def test_retry_with_kwargs(self): - retry_task_customexc.__class__.max_retries = 3 - retry_task_customexc.iterations = 0 - retry_task_customexc.apply([0xFF, 0xFFFF], {'kwarg': 0xF}) - self.assertEqual(retry_task_customexc.iterations, 4) + self.retry_task_customexc.max_retries = 3 + self.retry_task_customexc.iterations = 0 + self.retry_task_customexc.apply([0xFF, 0xFFFF], {'kwarg': 0xF}) + self.assertEqual(self.retry_task_customexc.iterations, 4) def test_retry_with_custom_exception(self): - retry_task_customexc.__class__.max_retries = 2 - retry_task_customexc.iterations = 0 - result = retry_task_customexc.apply([0xFF, 0xFFFF], {'kwarg': 0xF}) + self.retry_task_customexc.max_retries = 2 + self.retry_task_customexc.iterations = 0 + result = self.retry_task_customexc.apply( + [0xFF, 0xFFFF], {'kwarg': 0xF}, + ) with self.assertRaises(MyCustomException): result.get() - self.assertEqual(retry_task_customexc.iterations, 3) + self.assertEqual(self.retry_task_customexc.iterations, 3) def test_max_retries_exceeded(self): - retry_task.__class__.max_retries = 2 - retry_task.iterations = 0 - result = retry_task.apply([0xFF, 0xFFFF], {'care': False}) - with self.assertRaises(retry_task.MaxRetriesExceededError): + self.retry_task.max_retries = 2 + self.retry_task.iterations = 0 + result = self.retry_task.apply([0xFF, 0xFFFF], {'care': False}) + with self.assertRaises(self.retry_task.MaxRetriesExceededError): result.get() - self.assertEqual(retry_task.iterations, 3) + self.assertEqual(self.retry_task.iterations, 3) - retry_task.__class__.max_retries = 1 - retry_task.iterations = 0 - result = retry_task.apply([0xFF, 0xFFFF], {'care': False}) - with self.assertRaises(retry_task.MaxRetriesExceededError): + self.retry_task.max_retries = 1 + self.retry_task.iterations = 0 + result = self.retry_task.apply([0xFF, 0xFFFF], {'care': False}) + with self.assertRaises(self.retry_task.MaxRetriesExceededError): result.get() - self.assertEqual(retry_task.iterations, 2) + self.assertEqual(self.retry_task.iterations, 2) -class test_canvas_utils(Case): +class test_canvas_utils(TasksCase): def test_si(self): - self.assertTrue(retry_task.si()) - self.assertTrue(retry_task.si().immutable) + self.assertTrue(self.retry_task.si()) + self.assertTrue(self.retry_task.si().immutable) def test_chunks(self): - self.assertTrue(retry_task.chunks(range(100), 10)) + self.assertTrue(self.retry_task.chunks(range(100), 10)) def test_map(self): - self.assertTrue(retry_task.map(range(100))) + self.assertTrue(self.retry_task.map(range(100))) def test_starmap(self): - self.assertTrue(retry_task.starmap(range(100))) + self.assertTrue(self.retry_task.starmap(range(100))) def test_on_success(self): - retry_task.on_success(1, 1, (), {}) + self.retry_task.on_success(1, 1, (), {}) -class test_tasks(Case): +class test_tasks(TasksCase): + def now(self): + return self.app.now() + + @depends_on_current_app def test_unpickle_task(self): import pickle - @task + @self.app.task(shared=True) def xxx(): pass - self.assertIs(pickle.loads(pickle.dumps(xxx)), xxx.app.tasks[xxx.name]) - def createTask(self, name): - return task(__module__=self.__module__, name=name)(return_True) - def test_AsyncResult(self): task_id = uuid() - result = retry_task.AsyncResult(task_id) - self.assertEqual(result.backend, retry_task.backend) + result = self.retry_task.AsyncResult(task_id) + self.assertEqual(result.backend, self.retry_task.backend) self.assertEqual(result.id, task_id) def assertNextTaskDataEqual(self, consumer, presult, task_name, test_eta=False, test_expires=False, **kwargs): - next_task = consumer.queues[0].get() + next_task = consumer.queues[0].get(accept=['pickle']) task_data = next_task.decode() self.assertEqual(task_data['id'], presult.id) self.assertEqual(task_data['task'], task_name) task_kwargs = task_data.get('kwargs', {}) if test_eta: - self.assertIsInstance(task_data.get('eta'), basestring) + self.assertIsInstance(task_data.get('eta'), string_t) to_datetime = parse_iso8601(task_data.get('eta')) self.assertIsInstance(to_datetime, datetime) if test_expires: - self.assertIsInstance(task_data.get('expires'), basestring) + self.assertIsInstance(task_data.get('expires'), string_t) to_datetime = parse_iso8601(task_data.get('expires')) self.assertIsInstance(to_datetime, datetime) - for arg_name, arg_value in kwargs.items(): + for arg_name, arg_value in items(kwargs): self.assertEqual(task_kwargs.get(arg_name), arg_value) def test_incomplete_task_cls(self): class IncompleteTask(Task): + app = self.app name = 'c.unittest.t.itask' with self.assertRaises(NotImplementedError): @@ -273,140 +258,137 @@ class test_tasks(Case): def test_task_kwargs_must_be_dictionary(self): with self.assertRaises(ValueError): - increment_counter.apply_async([], 'str') + self.increment_counter.apply_async([], 'str') def test_task_args_must_be_list(self): with self.assertRaises(ValueError): - increment_counter.apply_async('str', {}) + self.increment_counter.apply_async('str', {}) def test_regular_task(self): - T1 = self.createTask('c.unittest.t.t1') - self.assertIsInstance(T1, BaseTask) - self.assertTrue(T1.run()) - self.assertTrue(callable(T1), 'Task class is callable()') - self.assertTrue(T1(), 'Task class runs run() when called') + self.assertIsInstance(self.mytask, Task) + self.assertTrue(self.mytask.run()) + self.assertTrue(isinstance(self.mytask, Callable), + 'Task class is callable()') + self.assertTrue(self.mytask(), 'Task class runs run() when called') - consumer = T1.get_consumer() - with self.assertRaises(NotImplementedError): - consumer.receive('foo', 'foo') - consumer.purge() - self.assertIsNone(consumer.queues[0].get()) + with self.app.connection_or_acquire() as conn: + consumer = self.app.amqp.TaskConsumer(conn) + with self.assertRaises(NotImplementedError): + consumer.receive('foo', 'foo') + consumer.purge() + self.assertIsNone(consumer.queues[0].get()) + self.app.amqp.TaskConsumer(conn, queues=[Queue('foo')]) - # Without arguments. - presult = T1.delay() - self.assertNextTaskDataEqual(consumer, presult, T1.name) + # Without arguments. + presult = self.mytask.delay() + self.assertNextTaskDataEqual(consumer, presult, self.mytask.name) - # With arguments. - presult2 = T1.apply_async(kwargs=dict(name='George Costanza')) - self.assertNextTaskDataEqual( - consumer, presult2, T1.name, name='George Costanza', - ) + # With arguments. + presult2 = self.mytask.apply_async( + kwargs=dict(name='George Costanza'), + ) + self.assertNextTaskDataEqual( + consumer, presult2, self.mytask.name, name='George Costanza', + ) - # send_task - sresult = send_task(T1.name, kwargs=dict(name='Elaine M. Benes')) - self.assertNextTaskDataEqual( - consumer, sresult, T1.name, name='Elaine M. Benes', - ) + # send_task + sresult = self.app.send_task(self.mytask.name, + kwargs=dict(name='Elaine M. Benes')) + self.assertNextTaskDataEqual( + consumer, sresult, self.mytask.name, name='Elaine M. Benes', + ) - # With eta. - presult2 = T1.apply_async( - kwargs=dict(name='George Costanza'), - eta=now() + timedelta(days=1), - expires=now() + timedelta(days=2), - ) - self.assertNextTaskDataEqual( - consumer, presult2, T1.name, - name='George Costanza', test_eta=True, test_expires=True, - ) + # With eta. + presult2 = self.mytask.apply_async( + kwargs=dict(name='George Costanza'), + eta=self.now() + timedelta(days=1), + expires=self.now() + timedelta(days=2), + ) + self.assertNextTaskDataEqual( + consumer, presult2, self.mytask.name, + name='George Costanza', test_eta=True, test_expires=True, + ) - # With countdown. - presult2 = T1.apply_async(kwargs=dict(name='George Costanza'), - countdown=10, expires=12) - self.assertNextTaskDataEqual( - consumer, presult2, T1.name, - name='George Costanza', test_eta=True, test_expires=True, - ) + # With countdown. + presult2 = self.mytask.apply_async( + kwargs=dict(name='George Costanza'), countdown=10, expires=12, + ) + self.assertNextTaskDataEqual( + consumer, presult2, self.mytask.name, + name='George Costanza', test_eta=True, test_expires=True, + ) - # Discarding all tasks. - consumer.purge() - T1.apply_async() - self.assertEqual(consumer.purge(), 1) - self.assertIsNone(consumer.queues[0].get()) + # Discarding all tasks. + consumer.purge() + self.mytask.apply_async() + self.assertEqual(consumer.purge(), 1) + self.assertIsNone(consumer.queues[0].get()) - self.assertFalse(presult.successful()) - T1.backend.mark_as_done(presult.id, result=None) - self.assertTrue(presult.successful()) + self.assertFalse(presult.successful()) + self.mytask.backend.mark_as_done(presult.id, result=None) + self.assertTrue(presult.successful()) - publisher = T1.get_publisher() - self.assertTrue(publisher.exchange) + def test_repr_v2_compat(self): + self.mytask.__v2_compat__ = True + self.assertIn('v2 compatible', repr(self.mytask)) + + def test_apply_with_self(self): + + @self.app.task(__self__=42, shared=False) + def tawself(self): + return self + + self.assertEqual(tawself.apply().get(), 42) + + self.assertEqual(tawself(), 42) def test_context_get(self): - task = self.createTask('c.unittest.t.c.g') - task.push_request() + self.mytask.push_request() try: - request = task.request + request = self.mytask.request request.foo = 32 self.assertEqual(request.get('foo'), 32) self.assertEqual(request.get('bar', 36), 36) request.clear() finally: - task.pop_request() + self.mytask.pop_request() def test_task_class_repr(self): - task = self.createTask('c.unittest.t.repr') - self.assertIn('class Task of', repr(task.app.Task)) - prev, task.app.Task._app = task.app.Task._app, None - try: - self.assertIn('unbound', repr(task.app.Task, )) - finally: - task.app.Task._app = prev + self.assertIn('class Task of', repr(self.mytask.app.Task)) + self.mytask.app.Task._app = None + self.assertIn('unbound', repr(self.mytask.app.Task, )) def test_bind_no_magic_kwargs(self): - task = self.createTask('c.unittest.t.magic_kwargs') - task.__class__.accept_magic_kwargs = None - task.bind(task.app) + self.mytask.accept_magic_kwargs = None + self.mytask.bind(self.mytask.app) def test_annotate(self): with patch('celery.app.task.resolve_all_annotations') as anno: anno.return_value = [{'FOO': 'BAR'}] - Task.annotate() - self.assertEqual(Task.FOO, 'BAR') + + @self.app.task(shared=False) + def task(): + pass + task.annotate() + self.assertEqual(task.FOO, 'BAR') def test_after_return(self): - task = self.createTask('c.unittest.t.after_return') - task.push_request() + self.mytask.push_request() try: - task.request.chord = return_True_task.s() - task.after_return('SUCCESS', 1.0, 'foobar', (), {}, None) - task.request.clear() + self.mytask.request.chord = self.mytask.s() + self.mytask.after_return('SUCCESS', 1.0, 'foobar', (), {}, None) + self.mytask.request.clear() finally: - task.pop_request() + self.mytask.pop_request() def test_send_task_sent_event(self): - T1 = self.createTask('c.unittest.t.t1') - app = T1.app - with app.connection() as conn: - app.conf.CELERY_SEND_TASK_SENT_EVENT = True - del(app.amqp.__dict__['TaskProducer']) - try: - self.assertTrue(app.amqp.TaskProducer(conn).send_sent_event) - finally: - app.conf.CELERY_SEND_TASK_SENT_EVENT = False - del(app.amqp.__dict__['TaskProducer']) - - def test_get_publisher(self): - connection = app_or_default().connection() - p = increment_counter.get_publisher(connection, auto_declare=False, - exchange='foo') - self.assertEqual(p.exchange.name, 'foo') - p = increment_counter.get_publisher(connection, auto_declare=False, - exchange='foo', - exchange_type='fanout') - self.assertEqual(p.exchange.type, 'fanout') + with self.app.connection() as conn: + self.app.conf.CELERY_SEND_TASK_SENT_EVENT = True + self.assertTrue(self.app.amqp.TaskProducer(conn).send_sent_event) def test_update_state(self): - @task + @self.app.task(shared=False) def yyy(): pass @@ -426,7 +408,7 @@ class test_tasks(Case): def test_repr(self): - @task + @self.app.task(shared=False) def task_test_repr(): pass @@ -434,838 +416,44 @@ class test_tasks(Case): def test_has___name__(self): - @task + @self.app.task(shared=False) def yyy2(): pass self.assertTrue(yyy2.__name__) - def test_get_logger(self): - t1 = self.createTask('c.unittest.t.t1') - t1.push_request() - try: - logfh = WhateverIO() - logger = t1.get_logger(logfile=logfh, loglevel=0) - self.assertTrue(logger) - t1.request.loglevel = 3 - logger = t1.get_logger(logfile=logfh, loglevel=None) - self.assertTrue(logger) - finally: - t1.pop_request() - - -class test_TaskSet(Case): - - @with_eager_tasks - def test_function_taskset(self): - subtasks = [return_True_task.s(i) for i in range(1, 6)] - ts = TaskSet(subtasks) - res = ts.apply_async() - self.assertListEqual(res.join(), [True, True, True, True, True]) - - def test_counter_taskset(self): - increment_counter.count = 0 - ts = TaskSet(tasks=[ - increment_counter.s(), - increment_counter.s(increment_by=2), - increment_counter.s(increment_by=3), - increment_counter.s(increment_by=4), - increment_counter.s(increment_by=5), - increment_counter.s(increment_by=6), - increment_counter.s(increment_by=7), - increment_counter.s(increment_by=8), - increment_counter.s(increment_by=9), - ]) - self.assertEqual(ts.total, 9) - - consumer = increment_counter.get_consumer() - consumer.purge() - consumer.close() - taskset_res = ts.apply_async() - subtasks = taskset_res.subtasks - taskset_id = taskset_res.taskset_id - consumer = increment_counter.get_consumer() - for subtask in subtasks: - m = consumer.queues[0].get().payload - self.assertDictContainsSubset({'taskset': taskset_id, - 'task': increment_counter.name, - 'id': subtask.id}, m) - increment_counter( - increment_by=m.get('kwargs', {}).get('increment_by')) - self.assertEqual(increment_counter.count, sum(xrange(1, 10))) - - def test_named_taskset(self): - prefix = 'test_named_taskset-' - ts = TaskSet([return_True_task.subtask([1])]) - res = ts.apply(taskset_id=prefix + uuid()) - self.assertTrue(res.taskset_id.startswith(prefix)) - - -class test_apply_task(Case): +class test_apply_task(TasksCase): def test_apply_throw(self): with self.assertRaises(KeyError): - raising.apply(throw=True) - - def test_apply_no_magic_kwargs(self): - increment_counter.accept_magic_kwargs = False - try: - increment_counter.apply() - finally: - increment_counter.accept_magic_kwargs = True + self.raising.apply(throw=True) def test_apply_with_CELERY_EAGER_PROPAGATES_EXCEPTIONS(self): - raising.app.conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True - try: - with self.assertRaises(KeyError): - raising.apply() - finally: - raising.app.conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS = False + self.app.conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True + with self.assertRaises(KeyError): + self.raising.apply() def test_apply(self): - increment_counter.count = 0 + self.increment_counter.count = 0 - e = increment_counter.apply() + e = self.increment_counter.apply() self.assertIsInstance(e, EagerResult) self.assertEqual(e.get(), 1) - e = increment_counter.apply(args=[1]) + e = self.increment_counter.apply(args=[1]) self.assertEqual(e.get(), 2) - e = increment_counter.apply(kwargs={'increment_by': 4}) + e = self.increment_counter.apply(kwargs={'increment_by': 4}) self.assertEqual(e.get(), 6) self.assertTrue(e.successful()) self.assertTrue(e.ready()) self.assertTrue(repr(e).startswith('= 3: - raise - else: - break - - def test_every_minute_execution_is_due(self): - last_ran = self.now - timedelta(seconds=61) - due, remaining = every_minute.run_every.is_due(last_ran) - self.assertTrue(due) - self.seconds_almost_equal(remaining, self.next_minute, 1) - - def test_every_minute_execution_is_not_due(self): - last_ran = self.now - timedelta(seconds=self.now.second) - due, remaining = every_minute.run_every.is_due(last_ran) - self.assertFalse(due) - self.seconds_almost_equal(remaining, self.next_minute, 1) - - # 29th of May 2010 is a saturday - @patch_crontab_nowfun(hourly, datetime(2010, 5, 29, 10, 30)) - def test_execution_is_due_on_saturday(self): - last_ran = self.now - timedelta(seconds=61) - due, remaining = every_minute.run_every.is_due(last_ran) - self.assertTrue(due) - self.seconds_almost_equal(remaining, self.next_minute, 1) - - # 30th of May 2010 is a sunday - @patch_crontab_nowfun(hourly, datetime(2010, 5, 30, 10, 30)) - def test_execution_is_due_on_sunday(self): - last_ran = self.now - timedelta(seconds=61) - due, remaining = every_minute.run_every.is_due(last_ran) - self.assertTrue(due) - self.seconds_almost_equal(remaining, self.next_minute, 1) - - # 31st of May 2010 is a monday - @patch_crontab_nowfun(hourly, datetime(2010, 5, 31, 10, 30)) - def test_execution_is_due_on_monday(self): - last_ran = self.now - timedelta(seconds=61) - due, remaining = every_minute.run_every.is_due(last_ran) - self.assertTrue(due) - self.seconds_almost_equal(remaining, self.next_minute, 1) - - @patch_crontab_nowfun(hourly, datetime(2010, 5, 10, 10, 30)) - def test_every_hour_execution_is_due(self): - due, remaining = hourly.run_every.is_due( - datetime(2010, 5, 10, 6, 30)) - self.assertTrue(due) - self.assertEqual(remaining, 60 * 60) - - @patch_crontab_nowfun(hourly, datetime(2010, 5, 10, 10, 29)) - def test_every_hour_execution_is_not_due(self): - due, remaining = hourly.run_every.is_due( - datetime(2010, 5, 10, 9, 30)) - self.assertFalse(due) - self.assertEqual(remaining, 60) - - @patch_crontab_nowfun(quarterly, datetime(2010, 5, 10, 10, 15)) - def test_first_quarter_execution_is_due(self): - due, remaining = quarterly.run_every.is_due( - datetime(2010, 5, 10, 6, 30)) - self.assertTrue(due) - self.assertEqual(remaining, 15 * 60) - - @patch_crontab_nowfun(quarterly, datetime(2010, 5, 10, 10, 30)) - def test_second_quarter_execution_is_due(self): - due, remaining = quarterly.run_every.is_due( - datetime(2010, 5, 10, 6, 30)) - self.assertTrue(due) - self.assertEqual(remaining, 15 * 60) - - @patch_crontab_nowfun(quarterly, datetime(2010, 5, 10, 10, 14)) - def test_first_quarter_execution_is_not_due(self): - due, remaining = quarterly.run_every.is_due( - datetime(2010, 5, 10, 10, 0)) - self.assertFalse(due) - self.assertEqual(remaining, 60) - - @patch_crontab_nowfun(quarterly, datetime(2010, 5, 10, 10, 29)) - def test_second_quarter_execution_is_not_due(self): - due, remaining = quarterly.run_every.is_due( - datetime(2010, 5, 10, 10, 15)) - self.assertFalse(due) - self.assertEqual(remaining, 60) - - @patch_crontab_nowfun(daily, datetime(2010, 5, 10, 7, 30)) - def test_daily_execution_is_due(self): - due, remaining = daily.run_every.is_due( - datetime(2010, 5, 9, 7, 30)) - self.assertTrue(due) - self.assertEqual(remaining, 24 * 60 * 60) - - @patch_crontab_nowfun(daily, datetime(2010, 5, 10, 10, 30)) - def test_daily_execution_is_not_due(self): - due, remaining = daily.run_every.is_due( - datetime(2010, 5, 10, 7, 30)) - self.assertFalse(due) - self.assertEqual(remaining, 21 * 60 * 60) - - @patch_crontab_nowfun(weekly, datetime(2010, 5, 6, 7, 30)) - def test_weekly_execution_is_due(self): - due, remaining = weekly.run_every.is_due( - datetime(2010, 4, 30, 7, 30)) - self.assertTrue(due) - self.assertEqual(remaining, 7 * 24 * 60 * 60) - - @patch_crontab_nowfun(weekly, datetime(2010, 5, 7, 10, 30)) - def test_weekly_execution_is_not_due(self): - due, remaining = weekly.run_every.is_due( - datetime(2010, 5, 6, 7, 30)) - self.assertFalse(due) - self.assertEqual(remaining, 6 * 24 * 60 * 60 - 3 * 60 * 60) - - @patch_crontab_nowfun(monthly, datetime(2010, 5, 13, 7, 30)) - def test_monthly_execution_is_due(self): - due, remaining = monthly.run_every.is_due( - datetime(2010, 4, 8, 7, 30)) - self.assertTrue(due) - self.assertEqual(remaining, 28 * 24 * 60 * 60) - - @patch_crontab_nowfun(monthly, datetime(2010, 5, 9, 10, 30)) - def test_monthly_execution_is_not_due(self): - due, remaining = monthly.run_every.is_due( - datetime(2010, 4, 8, 7, 30)) - self.assertFalse(due) - self.assertEqual(remaining, 4 * 24 * 60 * 60 - 3 * 60 * 60) - - @patch_crontab_nowfun(monthly_moy, datetime(2014, 2, 26, 22, 0)) - def test_monthly_moy_execution_is_due(self): - due, remaining = monthly_moy.run_every.is_due( - datetime(2013, 7, 4, 10, 0)) - self.assertTrue(due) - self.assertEqual(remaining, 60.) - - @patch_crontab_nowfun(monthly_moy, datetime(2013, 6, 28, 14, 30)) - def test_monthly_moy_execution_is_not_due(self): - due, remaining = monthly_moy.run_every.is_due( - datetime(2013, 6, 28, 22, 14)) - self.assertFalse(due) - attempt = ( - time.mktime(datetime(2014, 2, 26, 22, 0).timetuple()) - - time.mktime(datetime(2013, 6, 28, 14, 30).timetuple()) - - 60 * 60 - ) - self.assertEqual(remaining, attempt) - - @patch_crontab_nowfun(monthly_moy, datetime(2014, 2, 26, 22, 0)) - def test_monthly_moy_execution_is_due2(self): - due, remaining = monthly_moy.run_every.is_due( - datetime(2013, 2, 28, 10, 0)) - self.assertTrue(due) - self.assertEqual(remaining, 60.) - - @patch_crontab_nowfun(monthly_moy, datetime(2014, 2, 26, 21, 0)) - def test_monthly_moy_execution_is_not_due2(self): - due, remaining = monthly_moy.run_every.is_due( - datetime(2013, 6, 28, 22, 14)) - self.assertFalse(due) - attempt = 60 * 60 - self.assertEqual(remaining, attempt) - - @patch_crontab_nowfun(yearly, datetime(2010, 3, 11, 7, 30)) - def test_yearly_execution_is_due(self): - due, remaining = yearly.run_every.is_due( - datetime(2009, 3, 12, 7, 30)) - self.assertTrue(due) - self.assertEqual(remaining, 364 * 24 * 60 * 60) - - @patch_crontab_nowfun(yearly, datetime(2010, 3, 7, 10, 30)) - def test_yearly_execution_is_not_due(self): - due, remaining = yearly.run_every.is_due( - datetime(2009, 3, 12, 7, 30)) - self.assertFalse(due) - self.assertEqual(remaining, 4 * 24 * 60 * 60 - 3 * 60 * 60) diff --git a/awx/lib/site-packages/celery/tests/tasks/test_trace.py b/awx/lib/site-packages/celery/tests/tasks/test_trace.py index 558ba832f1..ef088d60dd 100644 --- a/awx/lib/site-packages/celery/tests/tasks/test_trace.py +++ b/awx/lib/site-packages/celery/tests/tasks/test_trace.py @@ -1,70 +1,158 @@ from __future__ import absolute_import -from __future__ import with_statement -from mock import patch - -from celery import current_app +from celery import uuid +from celery import signals from celery import states -from celery.exceptions import RetryTaskError -from celery.task.trace import TraceInfo, eager_trace_task, trace_task -from celery.tests.utils import Case, Mock +from celery.exceptions import Ignore, Retry +from celery.app.trace import ( + TraceInfo, + eager_trace_task, + trace_task, + setup_worker_optimizations, + reset_worker_optimizations, +) +from celery.tests.case import AppCase, Mock, patch -@current_app.task -def add(x, y): - return x + y - - -@current_app.task(ignore_result=True) -def add_cast(x, y): - return x + y - - -@current_app.task -def raises(exc): - raise exc - - -def trace(task, args=(), kwargs={}, propagate=False): +def trace(app, task, args=(), kwargs={}, propagate=False, **opts): return eager_trace_task(task, 'id-1', args, kwargs, - propagate=propagate) + propagate=propagate, app=app, **opts) -class test_trace(Case): +class TraceCase(AppCase): + + def setup(self): + @self.app.task(shared=False) + def add(x, y): + return x + y + self.add = add + + @self.app.task(shared=False, ignore_result=True) + def add_cast(x, y): + return x + y + self.add_cast = add_cast + + @self.app.task(shared=False) + def raises(exc): + raise exc + self.raises = raises + + def trace(self, *args, **kwargs): + return trace(self.app, *args, **kwargs) + + +class test_trace(TraceCase): def test_trace_successful(self): - retval, info = trace(add, (2, 2), {}) + retval, info = self.trace(self.add, (2, 2), {}) self.assertIsNone(info) self.assertEqual(retval, 4) + def test_trace_on_success(self): + + @self.app.task(shared=False, on_success=Mock()) + def add_with_success(x, y): + return x + y + + self.trace(add_with_success, (2, 2), {}) + self.assertTrue(add_with_success.on_success.called) + + def test_trace_after_return(self): + + @self.app.task(shared=False, after_return=Mock()) + def add_with_after_return(x, y): + return x + y + + self.trace(add_with_after_return, (2, 2), {}) + self.assertTrue(add_with_after_return.after_return.called) + + def test_with_prerun_receivers(self): + on_prerun = Mock() + signals.task_prerun.connect(on_prerun) + try: + self.trace(self.add, (2, 2), {}) + self.assertTrue(on_prerun.called) + finally: + signals.task_prerun.receivers[:] = [] + + def test_with_postrun_receivers(self): + on_postrun = Mock() + signals.task_postrun.connect(on_postrun) + try: + self.trace(self.add, (2, 2), {}) + self.assertTrue(on_postrun.called) + finally: + signals.task_postrun.receivers[:] = [] + + def test_with_success_receivers(self): + on_success = Mock() + signals.task_success.connect(on_success) + try: + self.trace(self.add, (2, 2), {}) + self.assertTrue(on_success.called) + finally: + signals.task_success.receivers[:] = [] + + def test_when_chord_part(self): + + @self.app.task(shared=False) + def add(x, y): + return x + y + add.backend = Mock() + + self.trace(add, (2, 2), {}, request={'chord': uuid()}) + add.backend.on_chord_part_return.assert_called_with(add) + + def test_when_backend_cleanup_raises(self): + + @self.app.task(shared=False) + def add(x, y): + return x + y + add.backend = Mock(name='backend') + add.backend.process_cleanup.side_effect = KeyError() + self.trace(add, (2, 2), {}, eager=False) + add.backend.process_cleanup.assert_called_with() + add.backend.process_cleanup.side_effect = MemoryError() + with self.assertRaises(MemoryError): + self.trace(add, (2, 2), {}, eager=False) + + def test_when_Ignore(self): + + @self.app.task(shared=False) + def ignored(): + raise Ignore() + + retval, info = self.trace(ignored, (), {}) + self.assertEqual(info.state, states.IGNORED) + def test_trace_SystemExit(self): with self.assertRaises(SystemExit): - trace(raises, (SystemExit(), ), {}) + self.trace(self.raises, (SystemExit(), ), {}) - def test_trace_RetryTaskError(self): - exc = RetryTaskError('foo', 'bar') - _, info = trace(raises, (exc, ), {}) + def test_trace_Retry(self): + exc = Retry('foo', 'bar') + _, info = self.trace(self.raises, (exc, ), {}) self.assertEqual(info.state, states.RETRY) self.assertIs(info.retval, exc) def test_trace_exception(self): exc = KeyError('foo') - _, info = trace(raises, (exc, ), {}) + _, info = self.trace(self.raises, (exc, ), {}) self.assertEqual(info.state, states.FAILURE) self.assertIs(info.retval, exc) def test_trace_exception_propagate(self): with self.assertRaises(KeyError): - trace(raises, (KeyError('foo'), ), {}, propagate=True) + self.trace(self.raises, (KeyError('foo'), ), {}, propagate=True) - @patch('celery.task.trace.build_tracer') - @patch('celery.task.trace.report_internal_error') + @patch('celery.app.trace.build_tracer') + @patch('celery.app.trace.report_internal_error') def test_outside_body_error(self, report_internal_error, build_tracer): tracer = Mock() tracer.side_effect = KeyError('foo') build_tracer.return_value = tracer - @current_app.task + @self.app.task(shared=False) def xtask(): pass @@ -73,7 +161,7 @@ class test_trace(Case): self.assertIs(xtask.__trace__, tracer) -class test_TraceInfo(Case): +class test_TraceInfo(TraceCase): class TI(TraceInfo): __slots__ = TraceInfo.__slots__ + ('__dict__', ) @@ -81,8 +169,24 @@ class test_TraceInfo(Case): def test_handle_error_state(self): x = self.TI(states.FAILURE) x.handle_failure = Mock() - x.handle_error_state(add_cast) + x.handle_error_state(self.add_cast) x.handle_failure.assert_called_with( - add_cast, - store_errors=add_cast.store_errors_even_if_ignored, + self.add_cast, + store_errors=self.add_cast.store_errors_even_if_ignored, ) + + +class test_stackprotection(AppCase): + + def test_stackprotection(self): + setup_worker_optimizations(self.app) + try: + @self.app.task(shared=False, bind=True) + def foo(self, i): + if i: + return foo(0) + return self.request + + self.assertTrue(foo(1).called_directly) + finally: + reset_worker_optimizations() diff --git a/awx/lib/site-packages/celery/tests/utilities/test_info.py b/awx/lib/site-packages/celery/tests/utilities/test_info.py deleted file mode 100644 index ef694b05be..0000000000 --- a/awx/lib/site-packages/celery/tests/utilities/test_info.py +++ /dev/null @@ -1,48 +0,0 @@ -from __future__ import absolute_import - -from celery import Celery -from celery.utils.text import indent -from celery.tests.utils import Case - -RANDTEXT = """\ -The quick brown -fox jumps -over the -lazy dog\ -""" - -RANDTEXT_RES = """\ - The quick brown - fox jumps - over the - lazy dog\ -""" - -QUEUES = { - 'queue1': { - 'exchange': 'exchange1', - 'exchange_type': 'type1', - 'routing_key': 'bind1', - }, - 'queue2': { - 'exchange': 'exchange2', - 'exchange_type': 'type2', - 'routing_key': 'bind2', - }, -} - - -QUEUE_FORMAT1 = """.> queue1: exchange:exchange1(type1) binding:bind1""" -QUEUE_FORMAT2 = """.> queue2: exchange:exchange2(type2) binding:bind2""" - - -class test_Info(Case): - - def test_textindent(self): - self.assertEqual(indent(RANDTEXT, 4), RANDTEXT_RES) - - def test_format_queues(self): - celery = Celery(set_as_current=False) - celery.amqp.queues = celery.amqp.Queues(QUEUES) - self.assertEqual(sorted(celery.amqp.queues.format().split('\n')), - sorted([QUEUE_FORMAT1, QUEUE_FORMAT2])) diff --git a/awx/lib/site-packages/celery/tests/utilities/test_serialization.py b/awx/lib/site-packages/celery/tests/utilities/test_serialization.py deleted file mode 100644 index e76f39871e..0000000000 --- a/awx/lib/site-packages/celery/tests/utilities/test_serialization.py +++ /dev/null @@ -1,19 +0,0 @@ -from __future__ import absolute_import -from __future__ import with_statement - -import sys - -from celery.tests.utils import Case, mask_modules - - -class test_AAPickle(Case): - - def test_no_cpickle(self): - prev = sys.modules.pop('celery.utils.serialization', None) - try: - with mask_modules('cPickle'): - from celery.utils.serialization import pickle - import pickle as orig_pickle - self.assertIs(pickle.dumps, orig_pickle.dumps) - finally: - sys.modules['celery.utils.serialization'] = prev diff --git a/awx/lib/site-packages/celery/tests/utilities/test_timeutils.py b/awx/lib/site-packages/celery/tests/utilities/test_timeutils.py deleted file mode 100644 index ed49eac76a..0000000000 --- a/awx/lib/site-packages/celery/tests/utilities/test_timeutils.py +++ /dev/null @@ -1,91 +0,0 @@ -from __future__ import absolute_import -from __future__ import with_statement - -from datetime import datetime, timedelta - -from mock import Mock - -from celery.exceptions import ImproperlyConfigured -from celery.utils import timeutils -from celery.utils.timeutils import timezone -from celery.tests.utils import Case - - -class test_timeutils(Case): - - def test_delta_resolution(self): - D = timeutils.delta_resolution - - dt = datetime(2010, 3, 30, 11, 50, 58, 41065) - deltamap = ((timedelta(days=2), datetime(2010, 3, 30, 0, 0)), - (timedelta(hours=2), datetime(2010, 3, 30, 11, 0)), - (timedelta(minutes=2), datetime(2010, 3, 30, 11, 50)), - (timedelta(seconds=2), dt)) - for delta, shoulda in deltamap: - self.assertEqual(D(dt, delta), shoulda) - - def test_timedelta_seconds(self): - deltamap = ((timedelta(seconds=1), 1), - (timedelta(seconds=27), 27), - (timedelta(minutes=3), 3 * 60), - (timedelta(hours=4), 4 * 60 * 60), - (timedelta(days=3), 3 * 86400)) - for delta, seconds in deltamap: - self.assertEqual(timeutils.timedelta_seconds(delta), seconds) - - def test_timedelta_seconds_returns_0_on_negative_time(self): - delta = timedelta(days=-2) - self.assertEqual(timeutils.timedelta_seconds(delta), 0) - - def test_humanize_seconds(self): - t = ((4 * 60 * 60 * 24, '4.00 days'), - (1 * 60 * 60 * 24, '1.00 day'), - (4 * 60 * 60, '4.00 hours'), - (1 * 60 * 60, '1.00 hour'), - (4 * 60, '4.00 minutes'), - (1 * 60, '1.00 minute'), - (4, '4.00 seconds'), - (1, '1.00 second'), - (4.3567631221, '4.36 seconds'), - (0, 'now')) - - for seconds, human in t: - self.assertEqual(timeutils.humanize_seconds(seconds), human) - - self.assertEqual(timeutils.humanize_seconds(4, prefix='about '), - 'about 4.00 seconds') - - def test_maybe_iso8601_datetime(self): - now = datetime.now() - self.assertIs(timeutils.maybe_iso8601(now), now) - - def test_maybe_timedelta(self): - D = timeutils.maybe_timedelta - - for i in (30, 30.6): - self.assertEqual(D(i), timedelta(seconds=i)) - - self.assertEqual(D(timedelta(days=2)), timedelta(days=2)) - - def test_remaining_relative(self): - timeutils.remaining(datetime.utcnow(), timedelta(hours=1), - relative=True) - - -class test_timezone(Case): - - def test_get_timezone_with_pytz(self): - prev, timeutils.pytz = timeutils.pytz, Mock() - try: - self.assertTrue(timezone.get_timezone('UTC')) - finally: - timeutils.pytz = prev - - def test_get_timezone_without_pytz(self): - prev, timeutils.pytz = timeutils.pytz, None - try: - self.assertTrue(timezone.get_timezone('UTC')) - with self.assertRaises(ImproperlyConfigured): - timezone.get_timezone('Europe/Oslo') - finally: - timeutils.pytz = prev diff --git a/awx/lib/site-packages/celery/tests/utilities/test_utils.py b/awx/lib/site-packages/celery/tests/utilities/test_utils.py deleted file mode 100644 index 4446d91e5d..0000000000 --- a/awx/lib/site-packages/celery/tests/utilities/test_utils.py +++ /dev/null @@ -1,165 +0,0 @@ -from __future__ import absolute_import -from __future__ import with_statement - -from kombu.utils.functional import promise - -from mock import patch - -from celery import utils -from celery.utils import text -from celery.utils import functional -from celery.utils.functional import mpromise, maybe_list -from celery.utils.threads import bgThread -from celery.tests.utils import Case - - -def double(x): - return x * 2 - - -class test_bgThread_interface(Case): - - def test_body(self): - x = bgThread() - with self.assertRaises(NotImplementedError): - x.body() - - -class test_chunks(Case): - - def test_chunks(self): - - # n == 2 - x = utils.chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2) - self.assertListEqual( - list(x), - [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]], - ) - - # n == 3 - x = utils.chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3) - self.assertListEqual( - list(x), - [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]], - ) - - # n == 2 (exact) - x = utils.chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), 2) - self.assertListEqual( - list(x), - [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]], - ) - - -class test_utils(Case): - - def test_is_iterable(self): - for a in 'f', ['f'], ('f', ), {'f': 'f'}: - self.assertTrue(utils.is_iterable(a)) - for b in object(), 1: - self.assertFalse(utils.is_iterable(b)) - - def test_padlist(self): - self.assertListEqual( - functional.padlist(['George', 'Costanza', 'NYC'], 3), - ['George', 'Costanza', 'NYC'], - ) - self.assertListEqual( - functional.padlist(['George', 'Costanza'], 3), - ['George', 'Costanza', None], - ) - self.assertListEqual( - functional.padlist(['George', 'Costanza', 'NYC'], 4, - default='Earth'), - ['George', 'Costanza', 'NYC', 'Earth'], - ) - - def test_firstmethod_AttributeError(self): - self.assertIsNone(functional.firstmethod('foo')([object()])) - - def test_firstmethod_promises(self): - - class A(object): - - def __init__(self, value=None): - self.value = value - - def m(self): - return self.value - - self.assertEqual('four', functional.firstmethod('m')([ - A(), A(), A(), A('four'), A('five')])) - self.assertEqual('four', functional.firstmethod('m')([ - A(), A(), A(), promise(lambda: A('four')), A('five')])) - - def test_first(self): - iterations = [0] - - def predicate(value): - iterations[0] += 1 - if value == 5: - return True - return False - - self.assertEqual(5, functional.first(predicate, xrange(10))) - self.assertEqual(iterations[0], 6) - - iterations[0] = 0 - self.assertIsNone(functional.first(predicate, xrange(10, 20))) - self.assertEqual(iterations[0], 10) - - def test_truncate_text(self): - self.assertEqual(text.truncate('ABCDEFGHI', 3), 'ABC...') - self.assertEqual(text.truncate('ABCDEFGHI', 10), 'ABCDEFGHI') - - def test_abbr(self): - self.assertEqual(text.abbr(None, 3), '???') - self.assertEqual(text.abbr('ABCDEFGHI', 6), 'ABC...') - self.assertEqual(text.abbr('ABCDEFGHI', 20), 'ABCDEFGHI') - self.assertEqual(text.abbr('ABCDEFGHI', 6, None), 'ABCDEF') - - def test_abbrtask(self): - self.assertEqual(text.abbrtask(None, 3), '???') - self.assertEqual( - text.abbrtask('feeds.tasks.refresh', 10), - '[.]refresh', - ) - self.assertEqual( - text.abbrtask('feeds.tasks.refresh', 30), - 'feeds.tasks.refresh', - ) - - def test_pretty(self): - self.assertTrue(text.pretty(('a', 'b', 'c'))) - - def test_cached_property(self): - - def fun(obj): - return fun.value - - x = utils.cached_property(fun) - self.assertIs(x.__get__(None), x) - self.assertIs(x.__set__(None, None), x) - self.assertIs(x.__delete__(None), x) - - def test_maybe_list(self): - self.assertEqual(maybe_list(1), [1]) - self.assertEqual(maybe_list([1]), [1]) - self.assertIsNone(maybe_list(None)) - - @patch('warnings.warn') - def test_warn_deprecated(self, warn): - utils.warn_deprecated('Foo') - self.assertTrue(warn.called) - - -class test_mpromise(Case): - - def test_is_memoized(self): - - it = iter(xrange(20, 30)) - p = mpromise(it.next) - self.assertEqual(p(), 20) - self.assertTrue(p.evaluated) - self.assertEqual(p(), 20) - self.assertEqual(repr(p), '20') diff --git a/awx/lib/site-packages/celery/tests/utils/__init__.py b/awx/lib/site-packages/celery/tests/utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/celery/tests/utilities/test_datastructures.py b/awx/lib/site-packages/celery/tests/utils/test_datastructures.py similarity index 55% rename from awx/lib/site-packages/celery/tests/utilities/test_datastructures.py rename to awx/lib/site-packages/celery/tests/utils/test_datastructures.py index 873be505d7..a149336f10 100644 --- a/awx/lib/site-packages/celery/tests/utilities/test_datastructures.py +++ b/awx/lib/site-packages/celery/tests/utils/test_datastructures.py @@ -1,17 +1,21 @@ from __future__ import absolute_import -from __future__ import with_statement + +import pickle +import sys + +from billiard.einfo import ExceptionInfo +from time import time from celery.datastructures import ( - ExceptionInfo, - LRUCache, LimitedSet, AttributeDict, DictAttribute, ConfigurationView, DependencyGraph, ) -from celery.utils.compat import THREAD_TIMEOUT_MAX -from celery.tests.utils import Case, WhateverIO +from celery.five import items + +from celery.tests.case import Case, Mock, WhateverIO, SkipTest, patch class Object(object): @@ -20,7 +24,7 @@ class Object(object): class test_DictAttribute(Case): - def test_get_set(self): + def test_get_set_keys_values_items(self): x = DictAttribute(Object()) x['foo'] = 'The quick brown fox' self.assertEqual(x['foo'], 'The quick brown fox') @@ -29,6 +33,14 @@ class test_DictAttribute(Case): self.assertIsNone(x.get('bar')) with self.assertRaises(KeyError): x['bar'] + x.foo = 'The quick yellow fox' + self.assertEqual(x['foo'], 'The quick yellow fox') + self.assertIn( + ('foo', 'The quick yellow fox'), + list(x.items()), + ) + self.assertIn('foo', list(x.keys())) + self.assertIn('The quick yellow fox', list(x.values())) def test_setdefault(self): x = DictAttribute(Object()) @@ -86,11 +98,45 @@ class test_ConfigurationView(Case): expected = {'changed_key': 1, 'default_key': 1, 'both': 2} - self.assertDictEqual(dict(self.view.items()), expected) + self.assertDictEqual(dict(items(self.view)), expected) self.assertItemsEqual(list(iter(self.view)), - expected.keys()) - self.assertItemsEqual(self.view.keys(), expected.keys()) - self.assertItemsEqual(self.view.values(), expected.values()) + list(expected.keys())) + self.assertItemsEqual(list(self.view.keys()), list(expected.keys())) + self.assertItemsEqual( + list(self.view.values()), + list(expected.values()), + ) + self.assertIn('changed_key', list(self.view.keys())) + self.assertIn(2, list(self.view.values())) + self.assertIn(('both', 2), list(self.view.items())) + + def test_add_defaults_dict(self): + defaults = {'foo': 10} + self.view.add_defaults(defaults) + self.assertEqual(self.view.foo, 10) + + def test_add_defaults_object(self): + defaults = Object() + defaults.foo = 10 + self.view.add_defaults(defaults) + self.assertEqual(self.view.foo, 10) + + def test_clear(self): + self.view.clear() + self.assertEqual(self.view.both, 1) + self.assertNotIn('changed_key', self.view) + + def test_bool(self): + self.assertTrue(bool(self.view)) + self.view._order[:] = [] + self.assertFalse(bool(self.view)) + + def test_len(self): + self.assertEqual(len(self.view), 3) + self.view.KEY = 33 + self.assertEqual(len(self.view), 4) + self.view.clear() + self.assertEqual(len(self.view), 2) def test_isa_mapping(self): from collections import Mapping @@ -122,7 +168,13 @@ class test_ExceptionInfo(Case): class test_LimitedSet(Case): + def setUp(self): + if sys.platform == 'win32': + raise SkipTest('Not working in Windows') + def test_add(self): + if sys.platform == 'win32': + raise SkipTest('Not working properly on Windows') s = LimitedSet(maxlen=2) s.add('foo') s.add('bar') @@ -133,14 +185,62 @@ class test_LimitedSet(Case): self.assertIn(n, s) self.assertNotIn('foo', s) - def test_iter(self): + def test_purge(self): + s = LimitedSet(maxlen=None) + [s.add(i) for i in range(10)] + s.maxlen = 2 + s.purge(1) + self.assertEqual(len(s), 9) + s.purge(None) + self.assertEqual(len(s), 2) + + # expired + s = LimitedSet(maxlen=None, expires=1) + [s.add(i) for i in range(10)] + s.maxlen = 2 + s.purge(1, now=lambda: time() + 100) + self.assertEqual(len(s), 9) + s.purge(None, now=lambda: time() + 100) + self.assertEqual(len(s), 2) + + # not expired + s = LimitedSet(maxlen=None, expires=1) + [s.add(i) for i in range(10)] + s.maxlen = 2 + s.purge(1, now=lambda: time() - 100) + self.assertEqual(len(s), 10) + s.purge(None, now=lambda: time() - 100) + self.assertEqual(len(s), 10) + + s = LimitedSet(maxlen=None) + [s.add(i) for i in range(10)] + s.maxlen = 2 + with patch('celery.datastructures.heappop') as hp: + hp.side_effect = IndexError() + s.purge() + hp.assert_called_with(s._heap) + with patch('celery.datastructures.heappop') as hp: + s._data = dict((i * 2, i * 2) for i in range(10)) + s.purge() + self.assertEqual(hp.call_count, 10) + + def test_pickleable(self): s = LimitedSet(maxlen=2) - items = 'foo', 'bar' + s.add('foo') + s.add('bar') + self.assertEqual(pickle.loads(pickle.dumps(s)), s) + + def test_iter(self): + raise SkipTest('Not working on Windows') + s = LimitedSet(maxlen=3) + items = ['foo', 'bar', 'baz', 'xaz'] for item in items: s.add(item) l = list(iter(s)) - for item in items: + for item in items[1:]: self.assertIn(item, l) + self.assertNotIn('foo', l) + self.assertListEqual(l, items[1:], 'order by insertion time') def test_repr(self): s = LimitedSet(maxlen=2) @@ -149,6 +249,13 @@ class test_LimitedSet(Case): s.add(item) self.assertIn('LimitedSet(', repr(s)) + def test_discard(self): + s = LimitedSet(maxlen=2) + s.add('foo') + s.discard('foo') + self.assertNotIn('foo', s) + s.discard('foo') + def test_clear(self): s = LimitedSet(maxlen=2) s.add('foo') @@ -178,92 +285,6 @@ class test_LimitedSet(Case): self.assertIsInstance(s.as_dict(), dict) -class test_LRUCache(Case): - - def test_expires(self): - limit = 100 - x = LRUCache(limit=limit) - slots = list(xrange(limit * 2)) - for i in slots: - x[i] = i - self.assertListEqual(x.keys(), list(slots[limit:])) - - def test_update_expires(self): - limit = 100 - x = LRUCache(limit=limit) - slots = list(xrange(limit * 2)) - for i in slots: - x.update({i: i}) - - self.assertListEqual(list(x.keys()), list(slots[limit:])) - - def test_least_recently_used(self): - x = LRUCache(3) - - x[1], x[2], x[3] = 1, 2, 3 - self.assertEqual(x.keys(), [1, 2, 3]) - - x[4], x[5] = 4, 5 - self.assertEqual(x.keys(), [3, 4, 5]) - - # access 3, which makes it the last used key. - x[3] - x[6] = 6 - self.assertEqual(x.keys(), [5, 3, 6]) - - x[7] = 7 - self.assertEqual(x.keys(), [3, 6, 7]) - - def assertSafeIter(self, method, interval=0.01, size=10000): - from threading import Thread, Event - from time import sleep - x = LRUCache(size) - x.update(zip(xrange(size), xrange(size))) - - class Burglar(Thread): - - def __init__(self, cache): - self.cache = cache - self._is_shutdown = Event() - self._is_stopped = Event() - Thread.__init__(self) - - def run(self): - while not self._is_shutdown.isSet(): - try: - self.cache.data.popitem(last=False) - except KeyError: - break - self._is_stopped.set() - - def stop(self): - self._is_shutdown.set() - self._is_stopped.wait() - self.join(THREAD_TIMEOUT_MAX) - - burglar = Burglar(x) - burglar.start() - try: - for _ in getattr(x, method)(): - sleep(0.0001) - finally: - burglar.stop() - - def test_safe_to_remove_while_iteritems(self): - self.assertSafeIter('iteritems') - - def test_safe_to_remove_while_keys(self): - self.assertSafeIter('keys') - - def test_safe_to_remove_while_itervalues(self): - self.assertSafeIter('itervalues') - - def test_items(self): - c = LRUCache() - c.update(a=1, b=2, c=3) - self.assertTrue(c.items()) - - class test_AttributeDict(Case): def test_getattr__setattr(self): @@ -298,15 +319,38 @@ class test_DependencyGraph(Case): self.assertLess(order.index('A'), order.index('C')) def test_edges(self): - self.assertListEqual(list(self.graph1().edges()), - ['C', 'D']) + self.assertItemsEqual( + list(self.graph1().edges()), + ['C', 'D'], + ) + + def test_connect(self): + x, y = self.graph1(), self.graph1() + x.connect(y) + + def test_valency_of_when_missing(self): + x = self.graph1() + self.assertEqual(x.valency_of('foobarbaz'), 0) + + def test_format(self): + x = self.graph1() + x.formatter = Mock() + obj = Mock() + self.assertTrue(x.format(obj)) + x.formatter.assert_called_with(obj) + x.formatter = None + self.assertIs(x.format(obj), obj) def test_items(self): self.assertDictEqual( - dict(self.graph1().items()), + dict(items(self.graph1())), {'A': [], 'B': [], 'C': ['A'], 'D': ['C', 'B']}, ) + def test_repr_node(self): + x = self.graph1() + self.assertTrue(x.repr_node('fasdswewqewq')) + def test_to_dot(self): s = WhateverIO() self.graph1().to_dot(s) diff --git a/awx/lib/site-packages/celery/tests/utilities/test_dispatcher.py b/awx/lib/site-packages/celery/tests/utils/test_dispatcher.py similarity index 92% rename from awx/lib/site-packages/celery/tests/utilities/test_dispatcher.py rename to awx/lib/site-packages/celery/tests/utils/test_dispatcher.py index 159351979b..72a36f3b33 100644 --- a/awx/lib/site-packages/celery/tests/utilities/test_dispatcher.py +++ b/awx/lib/site-packages/celery/tests/utils/test_dispatcher.py @@ -6,7 +6,7 @@ import sys import time from celery.utils.dispatch import Signal -from celery.tests.utils import Case +from celery.tests.case import Case if sys.platform.startswith('java'): @@ -55,7 +55,7 @@ class DispatcherTests(Case): # force cleanup just in case signal.receivers = [] - def testExact(self): + def test_exact(self): a_signal.connect(receiver_1_arg, sender=self) expected = [(receiver_1_arg, 'test')] result = a_signal.send(sender=self, val='test') @@ -63,7 +63,7 @@ class DispatcherTests(Case): a_signal.disconnect(receiver_1_arg, sender=self) self._testIsClean(a_signal) - def testIgnoredSender(self): + def test_ignored_sender(self): a_signal.connect(receiver_1_arg) expected = [(receiver_1_arg, 'test')] result = a_signal.send(sender=self, val='test') @@ -71,7 +71,7 @@ class DispatcherTests(Case): a_signal.disconnect(receiver_1_arg) self._testIsClean(a_signal) - def testGarbageCollected(self): + def test_garbage_collected(self): a = Callable() a_signal.connect(a.a, sender=self) expected = [] @@ -81,7 +81,7 @@ class DispatcherTests(Case): self.assertEqual(result, expected) self._testIsClean(a_signal) - def testMultipleRegistration(self): + def test_multiple_registration(self): a = Callable() a_signal.connect(a) a_signal.connect(a) @@ -97,7 +97,7 @@ class DispatcherTests(Case): garbage_collect() self._testIsClean(a_signal) - def testUidRegistration(self): + def test_uid_registration(self): def uid_based_receiver_1(**kwargs): pass @@ -111,8 +111,7 @@ class DispatcherTests(Case): a_signal.disconnect(dispatch_uid='uid') self._testIsClean(a_signal) - def testRobust(self): - """Test the sendRobust function""" + def test_robust(self): def fails(val, **kwargs): raise ValueError('this') @@ -125,7 +124,7 @@ class DispatcherTests(Case): a_signal.disconnect(fails) self._testIsClean(a_signal) - def testDisconnection(self): + def test_disconnection(self): receiver_1 = Callable() receiver_2 = Callable() receiver_3 = Callable() diff --git a/awx/lib/site-packages/celery/tests/utilities/test_encoding.py b/awx/lib/site-packages/celery/tests/utils/test_encoding.py similarity index 77% rename from awx/lib/site-packages/celery/tests/utilities/test_encoding.py rename to awx/lib/site-packages/celery/tests/utils/test_encoding.py index d9885b857f..bdd568982d 100644 --- a/awx/lib/site-packages/celery/tests/utilities/test_encoding.py +++ b/awx/lib/site-packages/celery/tests/utils/test_encoding.py @@ -1,7 +1,7 @@ -from __future__ import absolute_import +from __future__ import absolute_import, unicode_literals from celery.utils import encoding -from celery.tests.utils import Case +from celery.tests.case import Case class test_encoding(Case): @@ -9,7 +9,6 @@ class test_encoding(Case): def test_safe_str(self): self.assertTrue(encoding.safe_str(object())) self.assertTrue(encoding.safe_str('foo')) - self.assertTrue(encoding.safe_str(u'foo')) def test_safe_repr(self): self.assertTrue(encoding.safe_repr(object())) diff --git a/awx/lib/site-packages/celery/tests/utils/test_functional.py b/awx/lib/site-packages/celery/tests/utils/test_functional.py new file mode 100644 index 0000000000..b0b65822b5 --- /dev/null +++ b/awx/lib/site-packages/celery/tests/utils/test_functional.py @@ -0,0 +1,180 @@ +from __future__ import absolute_import + +import pickle + +from kombu.utils.functional import lazy + +from celery.five import THREAD_TIMEOUT_MAX, items, range, nextfun +from celery.utils.functional import ( + LRUCache, + firstmethod, + first, + mlazy, + padlist, + maybe_list, +) + +from celery.tests.case import Case + + +class test_LRUCache(Case): + + def test_expires(self): + limit = 100 + x = LRUCache(limit=limit) + slots = list(range(limit * 2)) + for i in slots: + x[i] = i + self.assertListEqual(list(x.keys()), list(slots[limit:])) + self.assertTrue(x.items()) + self.assertTrue(x.values()) + + def test_is_pickleable(self): + x = LRUCache(limit=10) + x.update(luke=1, leia=2) + y = pickle.loads(pickle.dumps(x)) + self.assertEqual(y.limit, y.limit) + self.assertEqual(y, x) + + def test_update_expires(self): + limit = 100 + x = LRUCache(limit=limit) + slots = list(range(limit * 2)) + for i in slots: + x.update({i: i}) + + self.assertListEqual(list(x.keys()), list(slots[limit:])) + + def test_least_recently_used(self): + x = LRUCache(3) + + x[1], x[2], x[3] = 1, 2, 3 + self.assertEqual(list(x.keys()), [1, 2, 3]) + + x[4], x[5] = 4, 5 + self.assertEqual(list(x.keys()), [3, 4, 5]) + + # access 3, which makes it the last used key. + x[3] + x[6] = 6 + self.assertEqual(list(x.keys()), [5, 3, 6]) + + x[7] = 7 + self.assertEqual(list(x.keys()), [3, 6, 7]) + + def assertSafeIter(self, method, interval=0.01, size=10000): + from threading import Thread, Event + from time import sleep + x = LRUCache(size) + x.update(zip(range(size), range(size))) + + class Burglar(Thread): + + def __init__(self, cache): + self.cache = cache + self._is_shutdown = Event() + self._is_stopped = Event() + Thread.__init__(self) + + def run(self): + while not self._is_shutdown.isSet(): + try: + self.cache.data.popitem(last=False) + except KeyError: + break + self._is_stopped.set() + + def stop(self): + self._is_shutdown.set() + self._is_stopped.wait() + self.join(THREAD_TIMEOUT_MAX) + + burglar = Burglar(x) + burglar.start() + try: + for _ in getattr(x, method)(): + sleep(0.0001) + finally: + burglar.stop() + + def test_safe_to_remove_while_iteritems(self): + self.assertSafeIter('iteritems') + + def test_safe_to_remove_while_keys(self): + self.assertSafeIter('keys') + + def test_safe_to_remove_while_itervalues(self): + self.assertSafeIter('itervalues') + + def test_items(self): + c = LRUCache() + c.update(a=1, b=2, c=3) + self.assertTrue(list(items(c))) + + +class test_utils(Case): + + def test_padlist(self): + self.assertListEqual( + padlist(['George', 'Costanza', 'NYC'], 3), + ['George', 'Costanza', 'NYC'], + ) + self.assertListEqual( + padlist(['George', 'Costanza'], 3), + ['George', 'Costanza', None], + ) + self.assertListEqual( + padlist(['George', 'Costanza', 'NYC'], 4, default='Earth'), + ['George', 'Costanza', 'NYC', 'Earth'], + ) + + def test_firstmethod_AttributeError(self): + self.assertIsNone(firstmethod('foo')([object()])) + + def test_firstmethod_handles_lazy(self): + + class A(object): + + def __init__(self, value=None): + self.value = value + + def m(self): + return self.value + + self.assertEqual('four', firstmethod('m')([ + A(), A(), A(), A('four'), A('five')])) + self.assertEqual('four', firstmethod('m')([ + A(), A(), A(), lazy(lambda: A('four')), A('five')])) + + def test_first(self): + iterations = [0] + + def predicate(value): + iterations[0] += 1 + if value == 5: + return True + return False + + self.assertEqual(5, first(predicate, range(10))) + self.assertEqual(iterations[0], 6) + + iterations[0] = 0 + self.assertIsNone(first(predicate, range(10, 20))) + self.assertEqual(iterations[0], 10) + + def test_maybe_list(self): + self.assertEqual(maybe_list(1), [1]) + self.assertEqual(maybe_list([1]), [1]) + self.assertIsNone(maybe_list(None)) + + +class test_mlazy(Case): + + def test_is_memoized(self): + + it = iter(range(20, 30)) + p = mlazy(nextfun(it)) + self.assertEqual(p(), 20) + self.assertTrue(p.evaluated) + self.assertEqual(p(), 20) + self.assertEqual(repr(p), '20') diff --git a/awx/lib/site-packages/celery/tests/utilities/test_imports.py b/awx/lib/site-packages/celery/tests/utils/test_imports.py similarity index 61% rename from awx/lib/site-packages/celery/tests/utilities/test_imports.py rename to awx/lib/site-packages/celery/tests/utils/test_imports.py index cc772ffec4..e7d88bc098 100644 --- a/awx/lib/site-packages/celery/tests/utilities/test_imports.py +++ b/awx/lib/site-packages/celery/tests/utils/test_imports.py @@ -1,18 +1,14 @@ from __future__ import absolute_import -from __future__ import with_statement - -from mock import Mock, patch from celery.utils.imports import ( qualname, - symbol_by_name, reload_from_cwd, module_file, find_module, NotAPackage, ) -from celery.tests.utils import Case +from celery.tests.case import Case, Mock, patch class test_import_utils(Case): @@ -29,25 +25,6 @@ class test_import_utils(Case): self.assertEqual(qualname(Class), 'quick.brown.Fox') self.assertEqual(qualname(Class()), 'quick.brown.Fox') - def test_symbol_by_name__instance_returns_instance(self): - instance = object() - self.assertIs(symbol_by_name(instance), instance) - - def test_symbol_by_name_returns_default(self): - default = object() - self.assertIs( - symbol_by_name('xyz.ryx.qedoa.weq:foz', default=default), - default, - ) - - def test_symbol_by_name_package(self): - from celery.worker import WorkController - self.assertIs( - symbol_by_name('.worker:WorkController', package='celery'), - WorkController, - ) - self.assertTrue(symbol_by_name(':group', package='celery')) - @patch('celery.utils.imports.reload') def test_reload_from_cwd(self, reload): reload_from_cwd('foo') diff --git a/awx/lib/site-packages/celery/tests/utilities/test_local.py b/awx/lib/site-packages/celery/tests/utils/test_local.py similarity index 80% rename from awx/lib/site-packages/celery/tests/utilities/test_local.py rename to awx/lib/site-packages/celery/tests/utils/test_local.py index b1d2ea6768..9778922473 100644 --- a/awx/lib/site-packages/celery/tests/utilities/test_local.py +++ b/awx/lib/site-packages/celery/tests/utils/test_local.py @@ -1,13 +1,15 @@ -from __future__ import absolute_import -from __future__ import with_statement +from __future__ import absolute_import, unicode_literals import sys -from nose import SkipTest - -from celery.local import Proxy, PromiseProxy, maybe_evaluate, try_import - -from celery.tests.utils import Case +from celery.five import string, long_t +from celery.local import ( + Proxy, + PromiseProxy, + maybe_evaluate, + try_import, +) +from celery.tests.case import Case, Mock class test_try_import(Case): @@ -44,13 +46,20 @@ class test_Proxy(Case): self.assertEqual(x.__class__, type(real)) self.assertEqual(x.__dict__, real.__dict__) self.assertEqual(repr(x), repr(real)) + self.assertTrue(x.__module__) - def test_nonzero(self): + def test_get_current_local(self): + x = Proxy(lambda: 10) + object.__setattr__(x, '_Proxy_local', Mock()) + self.assertTrue(x._get_current_object()) + + def test_bool(self): class X(object): - def __nonzero__(self): + def __bool__(self): return False + __nonzero__ = __bool__ x = Proxy(lambda: X()) self.assertFalse(x) @@ -69,19 +78,19 @@ class test_Proxy(Case): class X(object): def __unicode__(self): - return u'UNICODE' + return 'UNICODE' + __str__ = __unicode__ def __repr__(self): return 'REPR' x = Proxy(lambda: X()) - self.assertEqual(unicode(x), u'UNICODE') + self.assertEqual(string(x), 'UNICODE') del(X.__unicode__) - self.assertEqual(unicode(x), 'REPR') + del(X.__str__) + self.assertEqual(string(x), 'REPR') def test_dir(self): - if sys.version_info < (2, 6): - raise SkipTest('Not relevant for Py2.5') class X(object): @@ -99,8 +108,6 @@ class test_Proxy(Case): self.assertListEqual(dir(y), []) def test_getsetdel_attr(self): - if sys.version_info < (2, 6): - raise SkipTest('Not relevant for Py2.5') class X(object): a = 1 @@ -150,6 +157,41 @@ class test_Proxy(Case): self.assertIn(10, x) self.assertEqual(len(x), 3) self.assertTrue(iter(x)) + x[0:2] = [1, 2] + del(x[0:2]) + self.assertTrue(str(x)) + if sys.version_info[0] < 3: + self.assertEqual(x.__cmp__(object()), -1) + + def test_complex_cast(self): + + class O(object): + + def __complex__(self): + return 10.333 + + o = Proxy(O) + self.assertEqual(o.__complex__(), 10.333) + + def test_index(self): + + class O(object): + + def __index__(self): + return 1 + + o = Proxy(O) + self.assertEqual(o.__index__(), 1) + + def test_coerce(self): + + class O(object): + + def __coerce__(self, other): + return self, other + + o = Proxy(O) + self.assertTrue(o.__coerce__(3)) def test_int(self): self.assertEqual(Proxy(lambda: 10) + 1, Proxy(lambda: 11)) @@ -172,6 +214,9 @@ class test_Proxy(Case): self.assertTrue(Proxy(lambda: 10) <= Proxy(lambda: 10)) self.assertTrue(Proxy(lambda: 10) == Proxy(lambda: 10)) self.assertTrue(Proxy(lambda: 20) != Proxy(lambda: 10)) + self.assertTrue(Proxy(lambda: 100).__divmod__(30)) + self.assertTrue(Proxy(lambda: 100).__truediv__(30)) + self.assertTrue(abs(Proxy(lambda: -100))) x = Proxy(lambda: 10) x -= 1 @@ -213,7 +258,7 @@ class test_Proxy(Case): x = Proxy(lambda: 10) self.assertEqual(type(x.__float__()), float) self.assertEqual(type(x.__int__()), int) - self.assertEqual(type(x.__long__()), long) + self.assertEqual(type(x.__long__()), long_t) self.assertTrue(hex(x)) self.assertTrue(oct(x)) @@ -283,7 +328,9 @@ class test_PromiseProxy(Case): def test_maybe_evaluate(self): x = PromiseProxy(lambda: 30) + self.assertFalse(x.__evaluated__()) self.assertEqual(maybe_evaluate(x), 30) self.assertEqual(maybe_evaluate(x), 30) self.assertEqual(maybe_evaluate(30), 30) + self.assertTrue(x.__evaluated__()) diff --git a/awx/lib/site-packages/celery/tests/utilities/test_mail.py b/awx/lib/site-packages/celery/tests/utils/test_mail.py similarity index 66% rename from awx/lib/site-packages/celery/tests/utilities/test_mail.py rename to awx/lib/site-packages/celery/tests/utils/test_mail.py index a47a3d8600..4006fb0b5e 100644 --- a/awx/lib/site-packages/celery/tests/utilities/test_mail.py +++ b/awx/lib/site-packages/celery/tests/utils/test_mail.py @@ -1,10 +1,8 @@ from __future__ import absolute_import -from mock import Mock, patch +from celery.utils.mail import Message, Mailer, SSLError -from celery.utils.mail import Message, Mailer - -from celery.tests.utils import Case +from celery.tests.case import Case, Mock, patch msg = Message(to='george@vandelay.com', sender='elaine@pendant.com', @@ -22,25 +20,10 @@ class test_Message(Case): class test_Mailer(Case): - def test_send_supports_timeout(self): + def test_send_wrapper(self): mailer = Mailer() - mailer.supports_timeout = True mailer._send = Mock() mailer.send(msg) - mailer._send.assert_called_with(msg, timeout=2) - - @patch('socket.setdefaulttimeout') - @patch('socket.getdefaulttimeout') - def test_send_no_timeout(self, get, set): - mailer = Mailer() - mailer.supports_timeout = False - mailer._send = Mock() - get.return_value = 10 - mailer.send(msg) - get.assert_called_with() - sets = set.call_args_list - self.assertEqual(sets[0][0], (2, )) - self.assertEqual(sets[1][0], (10, )) mailer._send.assert_called_with(msg) @patch('smtplib.SMTP_SSL', create=True) @@ -64,3 +47,7 @@ class test_Mailer(Case): mailer._send(msg) client.sendmail.assert_called_With(msg.sender, msg.to, str(msg)) + + client.quit.side_effect = SSLError() + mailer._send(msg) + client.close.assert_called_with() diff --git a/awx/lib/site-packages/celery/tests/utilities/test_pickle.py b/awx/lib/site-packages/celery/tests/utils/test_pickle.py similarity index 92% rename from awx/lib/site-packages/celery/tests/utilities/test_pickle.py rename to awx/lib/site-packages/celery/tests/utils/test_pickle.py index 580286fc46..6b65bb3c55 100644 --- a/awx/lib/site-packages/celery/tests/utilities/test_pickle.py +++ b/awx/lib/site-packages/celery/tests/utils/test_pickle.py @@ -1,7 +1,7 @@ from __future__ import absolute_import from celery.utils.serialization import pickle -from celery.tests.utils import Case +from celery.tests.case import Case class RegularException(Exception): @@ -21,7 +21,7 @@ class test_Pickle(Case): exc = None try: raise RegularException('RegularException raised') - except RegularException, exc_: + except RegularException as exc_: exc = exc_ pickled = pickle.dumps({'exception': exc}) @@ -38,7 +38,7 @@ class test_Pickle(Case): raise ArgOverrideException( 'ArgOverrideException raised', status_code=100, ) - except ArgOverrideException, exc_: + except ArgOverrideException as exc_: exc = exc_ pickled = pickle.dumps({'exception': exc}) diff --git a/awx/lib/site-packages/celery/tests/utilities/test_platforms.py b/awx/lib/site-packages/celery/tests/utils/test_platforms.py similarity index 82% rename from awx/lib/site-packages/celery/tests/utilities/test_platforms.py rename to awx/lib/site-packages/celery/tests/utils/test_platforms.py index b7f22deddf..587e2c0351 100644 --- a/awx/lib/site-packages/celery/tests/utilities/test_platforms.py +++ b/awx/lib/site-packages/celery/tests/utils/test_platforms.py @@ -1,18 +1,15 @@ from __future__ import absolute_import -from __future__ import with_statement import errno import os -import resource +import sys import signal -from mock import Mock, patch - -from celery import current_app +from celery import _find_option_with_arg from celery import platforms +from celery.five import open_fqdn from celery.platforms import ( get_fdmax, - shellsplit, ignore_errno, set_process_title, signals, @@ -28,10 +25,49 @@ from celery.platforms import ( Pidfile, LockFailed, setgroups, - _setgroups_hack + _setgroups_hack, + close_open_fds, ) -from celery.tests.utils import Case, WhateverIO, override_stdouts, mock_open +try: + import resource +except ImportError: # pragma: no cover + resource = None # noqa + +from celery.tests.case import ( + Case, WhateverIO, Mock, SkipTest, + call, override_stdouts, mock_open, patch, +) + + +class test_find_option_with_arg(Case): + + def test_long_opt(self): + self.assertEqual( + _find_option_with_arg(['--foo=bar'], long_opts=['--foo']), + 'bar' + ) + + def test_short_opt(self): + self.assertEqual( + _find_option_with_arg(['-f', 'bar'], short_opts=['-f']), + 'bar' + ) + + +class test_close_open_fds(Case): + + def test_closes(self): + with patch('os.close') as _close: + with patch('os.closerange', create=True) as closerange: + with patch('celery.platforms.get_fdmax') as fdmax: + fdmax.return_value = 3 + close_open_fds() + if not closerange.called: + _close.assert_has_calls([call(2), call(1), call(0)]) + _close.side_effect = OSError() + _close.side_effect.errno = errno.EBADF + close_open_fds() class test_ignore_errno(Case): @@ -50,15 +86,6 @@ class test_ignore_errno(Case): raise exc -class test_shellsplit(Case): - - def test_split(self): - self.assertEqual( - shellsplit("the 'quick' brown fox"), - ['the', 'quick', 'brown', 'fox'], - ) - - class test_set_process_title(Case): def when_no_setps(self): @@ -80,6 +107,19 @@ class test_Signals(Case): self.assertTrue(signals.supported('INT')) self.assertFalse(signals.supported('SIGIMAGINARY')) + def test_reset_alarm(self): + if sys.platform == 'win32': + raise SkipTest('signal.alarm not available on Windows') + with patch('signal.alarm') as _alarm: + signals.reset_alarm() + _alarm.assert_called_with(0) + + def test_arm_alarm(self): + if hasattr(signal, 'setitimer'): + with patch('signal.setitimer', create=True) as seti: + signals.arm_alarm(30) + self.assertTrue(seti.called) + def test_signum(self): self.assertEqual(signals.signum(13), 13) self.assertEqual(signals.signum('INT'), signal.SIGINT) @@ -107,20 +147,24 @@ class test_Signals(Case): signals['INT'] = lambda *a: a -if not current_app.IS_WINDOWS: +if not platforms.IS_WINDOWS: class test_get_fdmax(Case): @patch('resource.getrlimit') def test_when_infinity(self, getrlimit): - getrlimit.return_value = [None, resource.RLIM_INFINITY] - default = object() - self.assertIs(get_fdmax(default), default) + with patch('os.sysconf') as sysconfig: + sysconfig.side_effect = KeyError() + getrlimit.return_value = [None, resource.RLIM_INFINITY] + default = object() + self.assertIs(get_fdmax(default), default) @patch('resource.getrlimit') def test_when_actual(self, getrlimit): - getrlimit.return_value = [None, 13] - self.assertEqual(get_fdmax(None), 13) + with patch('os.sysconf') as sysconfig: + sysconfig.side_effect = KeyError() + getrlimit.return_value = [None, 13] + self.assertEqual(get_fdmax(None), 13) class test_maybe_drop_privileges(Case): @@ -134,6 +178,11 @@ if not current_app.IS_WINDOWS: class pw_struct(object): pw_gid = 50001 + + def raise_on_second_call(*args, **kwargs): + setuid.side_effect = OSError() + setuid.side_effect.errno = errno.EPERM + setuid.side_effect = raise_on_second_call getpwuid.return_value = pw_struct() parse_uid.return_value = 5001 maybe_drop_privileges(uid='user') @@ -141,7 +190,7 @@ if not current_app.IS_WINDOWS: getpwuid.assert_called_with(5001) setgid.assert_called_with(50001) initgroups.assert_called_with(5001, 50001) - setuid.assert_called_with(5001) + setuid.assert_has_calls([call(5001), call(0)]) @patch('celery.platforms.parse_uid') @patch('celery.platforms.parse_gid') @@ -150,6 +199,11 @@ if not current_app.IS_WINDOWS: @patch('celery.platforms.initgroups') def test_with_guid(self, initgroups, setuid, setgid, parse_gid, parse_uid): + + def raise_on_second_call(*args, **kwargs): + setuid.side_effect = OSError() + setuid.side_effect.errno = errno.EPERM + setuid.side_effect = raise_on_second_call parse_uid.return_value = 5001 parse_gid.return_value = 50001 maybe_drop_privileges(uid='user', gid='group') @@ -157,7 +211,15 @@ if not current_app.IS_WINDOWS: parse_gid.assert_called_with('group') setgid.assert_called_with(50001) initgroups.assert_called_with(5001, 50001) - setuid.assert_called_with(5001) + setuid.assert_has_calls([call(5001), call(0)]) + + setuid.side_effect = None + with self.assertRaises(RuntimeError): + maybe_drop_privileges(uid='user', gid='group') + setuid.side_effect = OSError() + setuid.side_effect.errno = errno.EINVAL + with self.assertRaises(OSError): + maybe_drop_privileges(uid='user', gid='group') @patch('celery.platforms.setuid') @patch('celery.platforms.setgid') @@ -273,7 +335,7 @@ if not current_app.IS_WINDOWS: @patch('celery.platforms.signals') @patch('celery.platforms.maybe_drop_privileges') @patch('os.geteuid') - @patch('__builtin__.open') + @patch(open_fqdn) def test_default(self, open, geteuid, maybe_drop, signals, pidlock): geteuid.return_value = 0 @@ -305,11 +367,13 @@ if not current_app.IS_WINDOWS: @patch('os.chdir') @patch('os.umask') @patch('os.close') + @patch('os.closerange') @patch('os.open') @patch('os.dup2') - def test_open(self, dup2, open, close, umask, - chdir, _exit, setsid, fork): + def test_open(self, dup2, open, close, closer, umask, chdir, + _exit, setsid, fork): x = DaemonContext(workdir='/opt/workdir') + x.stdfds = [0, 1, 2] fork.return_value = 0 with x: @@ -327,17 +391,24 @@ if not current_app.IS_WINDOWS: fork.reset_mock() fork.return_value = 1 x = DaemonContext(workdir='/opt/workdir') + x.stdfds = [0, 1, 2] with x: pass self.assertEqual(fork.call_count, 1) _exit.assert_called_with(0) x = DaemonContext(workdir='/opt/workdir', fake=True) + x.stdfds = [0, 1, 2] x._detach = Mock() with x: pass self.assertFalse(x._detach.called) + x.after_chdir = Mock() + with x: + pass + x.after_chdir.assert_called_with() + class test_Pidfile(Case): @patch('celery.platforms.Pidfile') @@ -503,7 +574,7 @@ if not current_app.IS_WINDOWS: @patch('os.getpid') @patch('os.open') @patch('os.fdopen') - @patch('__builtin__.open') + @patch(open_fqdn) def test_write_pid(self, open_, fdopen, osopen, getpid, fsync): getpid.return_value = 1816 osopen.return_value = 13 @@ -529,7 +600,7 @@ if not current_app.IS_WINDOWS: @patch('os.getpid') @patch('os.open') @patch('os.fdopen') - @patch('__builtin__.open') + @patch(open_fqdn) def test_write_reread_fails(self, open_, fdopen, osopen, getpid, fsync): getpid.return_value = 1816 @@ -555,11 +626,11 @@ if not current_app.IS_WINDOWS: return raise ValueError() setgroups.side_effect = on_setgroups - _setgroups_hack(range(400)) + _setgroups_hack(list(range(400))) setgroups.side_effect = ValueError() with self.assertRaises(ValueError): - _setgroups_hack(range(400)) + _setgroups_hack(list(range(400))) @patch('os.setgroups', create=True) def test_setgroups_hack_OSError(self, setgroups): @@ -573,31 +644,31 @@ if not current_app.IS_WINDOWS: raise exc setgroups.side_effect = on_setgroups - _setgroups_hack(range(400)) + _setgroups_hack(list(range(400))) setgroups.side_effect = exc with self.assertRaises(OSError): - _setgroups_hack(range(400)) + _setgroups_hack(list(range(400))) exc2 = OSError() exc.errno = errno.ESRCH setgroups.side_effect = exc2 with self.assertRaises(OSError): - _setgroups_hack(range(400)) + _setgroups_hack(list(range(400))) @patch('os.sysconf') @patch('celery.platforms._setgroups_hack') def test_setgroups(self, hack, sysconf): sysconf.return_value = 100 - setgroups(range(400)) - hack.assert_called_with(range(100)) + setgroups(list(range(400))) + hack.assert_called_with(list(range(100))) @patch('os.sysconf') @patch('celery.platforms._setgroups_hack') def test_setgroups_sysconf_raises(self, hack, sysconf): sysconf.side_effect = ValueError() - setgroups(range(400)) - hack.assert_called_with(range(400)) + setgroups(list(range(400))) + hack.assert_called_with(list(range(400))) @patch('os.getgroups') @patch('os.sysconf') @@ -608,7 +679,7 @@ if not current_app.IS_WINDOWS: esrch.errno = errno.ESRCH hack.side_effect = esrch with self.assertRaises(OSError): - setgroups(range(400)) + setgroups(list(range(400))) @patch('os.getgroups') @patch('os.sysconf') @@ -618,11 +689,11 @@ if not current_app.IS_WINDOWS: eperm = OSError() eperm.errno = errno.EPERM hack.side_effect = eperm - getgroups.return_value = range(400) - setgroups(range(400)) + getgroups.return_value = list(range(400)) + setgroups(list(range(400))) getgroups.assert_called_with() getgroups.return_value = [1000] with self.assertRaises(OSError): - setgroups(range(400)) + setgroups(list(range(400))) getgroups.assert_called_with() diff --git a/awx/lib/site-packages/celery/tests/utilities/test_saferef.py b/awx/lib/site-packages/celery/tests/utils/test_saferef.py similarity index 68% rename from awx/lib/site-packages/celery/tests/utilities/test_saferef.py rename to awx/lib/site-packages/celery/tests/utils/test_saferef.py index 1f5ebba550..9c18d71b16 100644 --- a/awx/lib/site-packages/celery/tests/utilities/test_saferef.py +++ b/awx/lib/site-packages/celery/tests/utils/test_saferef.py @@ -1,7 +1,8 @@ from __future__ import absolute_import +from celery.five import range from celery.utils.dispatch.saferef import safe_ref -from celery.tests.utils import Case +from celery.tests.case import Case class Class1(object): @@ -25,14 +26,14 @@ class SaferefTests(Case): def setUp(self): ts = [] ss = [] - for x in xrange(5000): + for x in range(5000): t = Class1() ts.append(t) s = safe_ref(t.x, self._closure) ss.append(s) ts.append(fun) ss.append(safe_ref(fun, self._closure)) - for x in xrange(30): + for x in range(30): t = Class2() ts.append(t) s = safe_ref(t, self._closure) @@ -45,18 +46,30 @@ class SaferefTests(Case): del self.ts del self.ss - def testIn(self): - """Test the "in" operator for safe references (cmp)""" + def test_in(self): + """test_in + + Test the "in" operator for safe references (cmp) + + """ for t in self.ts[:50]: self.assertTrue(safe_ref(t.x) in self.ss) - def testValid(self): - """Test that the references are valid (return instance methods)""" + def test_valid(self): + """test_value + + Test that the references are valid (return instance methods) + + """ for s in self.ss: self.assertTrue(s()) - def testShortCircuit(self): - """Test that creation short-circuits to reuse existing references""" + def test_shortcircuit(self): + """test_shortcircuit + + Test that creation short-circuits to reuse existing references + + """ sd = {} for s in self.ss: sd[s] = 1 @@ -66,8 +79,10 @@ class SaferefTests(Case): else: self.assertIn(safe_ref(t), sd) - def testRepresentation(self): - """Test that the reference object's representation works + def test_representation(self): + """test_representation + + Test that the reference object's representation works XXX Doesn't currently check the results, just that no error is raised diff --git a/awx/lib/site-packages/celery/tests/utils/test_serialization.py b/awx/lib/site-packages/celery/tests/utils/test_serialization.py new file mode 100644 index 0000000000..53dfdadebd --- /dev/null +++ b/awx/lib/site-packages/celery/tests/utils/test_serialization.py @@ -0,0 +1,42 @@ +from __future__ import absolute_import + +import sys + +from celery.utils.serialization import ( + UnpickleableExceptionWrapper, + get_pickleable_etype, +) + +from celery.tests.case import Case, mask_modules + + +class test_AAPickle(Case): + + def test_no_cpickle(self): + prev = sys.modules.pop('celery.utils.serialization', None) + try: + with mask_modules('cPickle'): + from celery.utils.serialization import pickle + import pickle as orig_pickle + self.assertIs(pickle.dumps, orig_pickle.dumps) + finally: + sys.modules['celery.utils.serialization'] = prev + + +class test_UnpickleExceptionWrapper(Case): + + def test_init(self): + x = UnpickleableExceptionWrapper('foo', 'Bar', [10, lambda x: x]) + self.assertTrue(x.exc_args) + self.assertEqual(len(x.exc_args), 2) + + +class test_get_pickleable_etype(Case): + + def test_get_pickleable_etype(self): + + class Unpickleable(Exception): + def __reduce__(self): + raise ValueError('foo') + + self.assertIs(get_pickleable_etype(Unpickleable), Exception) diff --git a/awx/lib/site-packages/celery/tests/utils/test_sysinfo.py b/awx/lib/site-packages/celery/tests/utils/test_sysinfo.py new file mode 100644 index 0000000000..4cd32c7e7e --- /dev/null +++ b/awx/lib/site-packages/celery/tests/utils/test_sysinfo.py @@ -0,0 +1,33 @@ +from __future__ import absolute_import + +import os + +from celery.utils.sysinfo import load_average, df + +from celery.tests.case import Case, SkipTest, patch + + +class test_load_average(Case): + + def test_avg(self): + if not hasattr(os, 'getloadavg'): + raise SkipTest('getloadavg not available') + with patch('os.getloadavg') as getloadavg: + getloadavg.return_value = 0.54736328125, 0.6357421875, 0.69921875 + l = load_average() + self.assertTrue(l) + self.assertEqual(l, (0.55, 0.64, 0.7)) + + +class test_df(Case): + + def test_df(self): + try: + from posix import statvfs_result # noqa + except ImportError: + raise SkipTest('statvfs not available') + x = df('/') + self.assertTrue(x.total_blocks) + self.assertTrue(x.available) + self.assertTrue(x.capacity) + self.assertTrue(x.stat) diff --git a/awx/lib/site-packages/celery/tests/utilities/test_term.py b/awx/lib/site-packages/celery/tests/utils/test_term.py similarity index 65% rename from awx/lib/site-packages/celery/tests/utilities/test_term.py rename to awx/lib/site-packages/celery/tests/utils/test_term.py index 4f3e7ff22a..1bd7e4341c 100644 --- a/awx/lib/site-packages/celery/tests/utilities/test_term.py +++ b/awx/lib/site-packages/celery/tests/utils/test_term.py @@ -1,26 +1,30 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import +from __future__ import absolute_import, unicode_literals -from kombu.utils import encoding -from kombu.utils.encoding import str_t +import sys from celery.utils import term from celery.utils.term import colored, fg +from celery.five import text_t -from celery.tests.utils import Case +from celery.tests.case import Case, SkipTest class test_colored(Case): def setUp(self): - self._prev_encoding = encoding.default_encoding + if sys.platform == 'win32': + raise SkipTest('Colors not supported on Windows') + + self._prev_encoding = sys.getdefaultencoding def getdefaultencoding(): return 'utf-8' - encoding.default_encoding = getdefaultencoding + + sys.getdefaultencoding = getdefaultencoding def tearDown(self): - encoding.default_encoding = self._prev_encoding + sys.getdefaultencoding = self._prev_encoding def test_colors(self): colors = ( @@ -51,35 +55,35 @@ class test_colored(Case): self.assertTrue(str(colored().iwhite('f'))) self.assertTrue(str(colored().reset('f'))) - self.assertTrue(str_t(colored().green(u'∂bar'))) + self.assertTrue(text_t(colored().green('∂bar'))) self.assertTrue( - colored().red(u'éefoo') + colored().green(u'∂bar')) + colored().red('éefoo') + colored().green('∂bar')) self.assertEqual( colored().red('foo').no_color(), 'foo') self.assertTrue( - repr(colored().blue(u'åfoo'))) + repr(colored().blue('åfoo'))) - self.assertEqual(repr(colored()), "''") + self.assertIn("''", repr(colored())) c = colored() s = c.red('foo', c.blue('bar'), c.green('baz')) self.assertTrue(s.no_color()) - c._fold_no_color(s, u'øfoo') - c._fold_no_color(u'fooå', s) + c._fold_no_color(s, 'øfoo') + c._fold_no_color('fooå', s) - c = colored().red(u'åfoo') + c = colored().red('åfoo') self.assertEqual( - c._add(c, u'baræ'), - u'\x1b[1;31m\xe5foo\x1b[0mbar\xe6', + c._add(c, 'baræ'), + '\x1b[1;31m\xe5foo\x1b[0mbar\xe6', ) - c2 = colored().blue(u'ƒƒz') + c2 = colored().blue('ƒƒz') c3 = c._add(c, c2) self.assertEqual( c3, - u'\x1b[1;31m\xe5foo\x1b[0m\x1b[1;34m\u0192\u0192z\x1b[0m', + '\x1b[1;31m\xe5foo\x1b[0m\x1b[1;34m\u0192\u0192z\x1b[0m', ) diff --git a/awx/lib/site-packages/celery/tests/utils/test_text.py b/awx/lib/site-packages/celery/tests/utils/test_text.py new file mode 100644 index 0000000000..383bdb6ee9 --- /dev/null +++ b/awx/lib/site-packages/celery/tests/utils/test_text.py @@ -0,0 +1,88 @@ +from __future__ import absolute_import + +from celery.utils.text import ( + indent, + ensure_2lines, + abbr, + truncate, + abbrtask, + pretty, +) +from celery.tests.case import AppCase, Case + +RANDTEXT = """\ +The quick brown +fox jumps +over the +lazy dog\ +""" + +RANDTEXT_RES = """\ + The quick brown + fox jumps + over the + lazy dog\ +""" + +QUEUES = { + 'queue1': { + 'exchange': 'exchange1', + 'exchange_type': 'type1', + 'routing_key': 'bind1', + }, + 'queue2': { + 'exchange': 'exchange2', + 'exchange_type': 'type2', + 'routing_key': 'bind2', + }, +} + + +QUEUE_FORMAT1 = '.> queue1 exchange=exchange1(type1) key=bind1' +QUEUE_FORMAT2 = '.> queue2 exchange=exchange2(type2) key=bind2' + + +class test_Info(AppCase): + + def test_textindent(self): + self.assertEqual(indent(RANDTEXT, 4), RANDTEXT_RES) + + def test_format_queues(self): + self.app.amqp.queues = self.app.amqp.Queues(QUEUES) + self.assertEqual(sorted(self.app.amqp.queues.format().split('\n')), + sorted([QUEUE_FORMAT1, QUEUE_FORMAT2])) + + def test_ensure_2lines(self): + self.assertEqual( + len(ensure_2lines('foo\nbar\nbaz\n').splitlines()), 3, + ) + self.assertEqual( + len(ensure_2lines('foo\nbar').splitlines()), 2, + ) + + +class test_utils(Case): + + def test_truncate_text(self): + self.assertEqual(truncate('ABCDEFGHI', 3), 'ABC...') + self.assertEqual(truncate('ABCDEFGHI', 10), 'ABCDEFGHI') + + def test_abbr(self): + self.assertEqual(abbr(None, 3), '???') + self.assertEqual(abbr('ABCDEFGHI', 6), 'ABC...') + self.assertEqual(abbr('ABCDEFGHI', 20), 'ABCDEFGHI') + self.assertEqual(abbr('ABCDEFGHI', 6, None), 'ABCDEF') + + def test_abbrtask(self): + self.assertEqual(abbrtask(None, 3), '???') + self.assertEqual( + abbrtask('feeds.tasks.refresh', 10), + '[.]refresh', + ) + self.assertEqual( + abbrtask('feeds.tasks.refresh', 30), + 'feeds.tasks.refresh', + ) + + def test_pretty(self): + self.assertTrue(pretty(('a', 'b', 'c'))) diff --git a/awx/lib/site-packages/celery/tests/utils/test_threads.py b/awx/lib/site-packages/celery/tests/utils/test_threads.py new file mode 100644 index 0000000000..4c85b2338b --- /dev/null +++ b/awx/lib/site-packages/celery/tests/utils/test_threads.py @@ -0,0 +1,107 @@ +from __future__ import absolute_import + +from celery.utils.threads import ( + _LocalStack, + _FastLocalStack, + LocalManager, + Local, + bgThread, +) + +from celery.tests.case import Case, override_stdouts, patch + + +class test_bgThread(Case): + + def test_crash(self): + + class T(bgThread): + + def body(self): + raise KeyError() + + with patch('os._exit') as _exit: + with override_stdouts(): + _exit.side_effect = ValueError() + t = T() + with self.assertRaises(ValueError): + t.run() + _exit.assert_called_with(1) + + def test_interface(self): + x = bgThread() + with self.assertRaises(NotImplementedError): + x.body() + + +class test_Local(Case): + + def test_iter(self): + x = Local() + x.foo = 'bar' + ident = x.__ident_func__() + self.assertIn((ident, {'foo': 'bar'}), list(iter(x))) + + delattr(x, 'foo') + self.assertNotIn((ident, {'foo': 'bar'}), list(iter(x))) + with self.assertRaises(AttributeError): + delattr(x, 'foo') + + self.assertIsNotNone(x(lambda: 'foo')) + + +class test_LocalStack(Case): + + def test_stack(self): + x = _LocalStack() + self.assertIsNone(x.pop()) + x.__release_local__() + ident = x.__ident_func__ + x.__ident_func__ = ident + + with self.assertRaises(RuntimeError): + x()[0] + + x.push(['foo']) + self.assertEqual(x()[0], 'foo') + x.pop() + with self.assertRaises(RuntimeError): + x()[0] + + +class test_FastLocalStack(Case): + + def test_stack(self): + x = _FastLocalStack() + x.push(['foo']) + x.push(['bar']) + self.assertEqual(x.top, ['bar']) + self.assertEqual(len(x), 2) + x.pop() + self.assertEqual(x.top, ['foo']) + x.pop() + self.assertIsNone(x.top) + + +class test_LocalManager(Case): + + def test_init(self): + x = LocalManager() + self.assertListEqual(x.locals, []) + self.assertTrue(x.ident_func) + + ident = lambda: 1 + loc = Local() + x = LocalManager([loc], ident_func=ident) + self.assertListEqual(x.locals, [loc]) + x = LocalManager(loc, ident_func=ident) + self.assertListEqual(x.locals, [loc]) + self.assertIs(x.ident_func, ident) + self.assertIs(x.locals[0].__ident_func__, ident) + self.assertEqual(x.get_ident(), 1) + + with patch('celery.utils.threads.release_local') as release: + x.cleanup() + release.assert_called_with(loc) + + self.assertTrue(repr(x)) diff --git a/awx/lib/site-packages/celery/tests/utilities/test_timer2.py b/awx/lib/site-packages/celery/tests/utils/test_timer2.py similarity index 73% rename from awx/lib/site-packages/celery/tests/utilities/test_timer2.py rename to awx/lib/site-packages/celery/tests/utils/test_timer2.py index 6a6dd45183..cb18c21239 100644 --- a/awx/lib/site-packages/celery/tests/utilities/test_timer2.py +++ b/awx/lib/site-packages/celery/tests/utils/test_timer2.py @@ -1,15 +1,12 @@ from __future__ import absolute_import -from __future__ import with_statement import sys import time -from kombu.tests.utils import redirect_stdouts -from mock import Mock, patch - import celery.utils.timer2 as timer2 -from celery.tests.utils import Case, skip_if_quick +from celery.tests.case import Case, Mock, patch +from kombu.tests.case import redirect_stdouts class test_Entry(Case): @@ -30,6 +27,10 @@ class test_Entry(Case): tref.cancel() self.assertTrue(tref.cancelled) + def test_repr(self): + tref = timer2.Entry(lambda x: x(1, ), {}) + self.assertTrue(repr(tref)) + class test_Schedule(Case): @@ -41,39 +42,32 @@ class test_Schedule(Case): x.cancel(tref) tref.cancel.assert_called_with() + self.assertIs(x.schedule, x) + def test_handle_error(self): from datetime import datetime - to_timestamp = timer2.to_timestamp scratch = [None] - def _overflow(x): - raise OverflowError(x) - def on_error(exc_info): scratch[0] = exc_info s = timer2.Schedule(on_error=on_error) - timer2.to_timestamp = _overflow - try: - s.enter(timer2.Entry(lambda: None, (), {}), - eta=datetime.now()) - s.enter(timer2.Entry(lambda: None, (), {}), - eta=None) + with patch('kombu.async.timer.to_timestamp') as tot: + tot.side_effect = OverflowError() + s.enter_at(timer2.Entry(lambda: None, (), {}), + eta=datetime.now()) + s.enter_at(timer2.Entry(lambda: None, (), {}), eta=None) s.on_error = None with self.assertRaises(OverflowError): - s.enter(timer2.Entry(lambda: None, (), {}), - eta=datetime.now()) - finally: - timer2.to_timestamp = to_timestamp - + s.enter_at(timer2.Entry(lambda: None, (), {}), + eta=datetime.now()) exc = scratch[0] self.assertIsInstance(exc, OverflowError) class test_Timer(Case): - @skip_if_quick def test_enter_after(self): t = timer2.Timer() try: @@ -82,7 +76,7 @@ class test_Timer(Case): def set_done(): done[0] = True - t.apply_after(300, set_done) + t.call_after(0.3, set_done) mss = 0 while not done[0]: if mss >= 2.0: @@ -94,29 +88,36 @@ class test_Timer(Case): def test_exit_after(self): t = timer2.Timer() - t.apply_after = Mock() - t.exit_after(300, priority=10) - t.apply_after.assert_called_with(300, sys.exit, 10) + t.call_after = Mock() + t.exit_after(0.3, priority=10) + t.call_after.assert_called_with(0.3, sys.exit, 10) - def test_apply_interval(self): + def test_ensure_started_not_started(self): + t = timer2.Timer() + t.running = True + t.start = Mock() + t.ensure_started() + self.assertFalse(t.start.called) + + def test_call_repeatedly(self): t = timer2.Timer() try: t.schedule.enter_after = Mock() myfun = Mock() myfun.__name__ = 'myfun' - t.apply_interval(30, myfun) + t.call_repeatedly(0.03, myfun) self.assertEqual(t.schedule.enter_after.call_count, 1) args1, _ = t.schedule.enter_after.call_args_list[0] - msec1, tref1, _ = args1 - self.assertEqual(msec1, 30) + sec1, tref1, _ = args1 + self.assertEqual(sec1, 0.03) tref1() self.assertEqual(t.schedule.enter_after.call_count, 2) args2, _ = t.schedule.enter_after.call_args_list[1] - msec2, tref2, _ = args2 - self.assertEqual(msec2, 30) + sec2, tref2, _ = args2 + self.assertEqual(sec2, 0.03) tref2.cancelled = True tref2() @@ -124,7 +125,7 @@ class test_Timer(Case): finally: t.stop() - @patch('celery.utils.timer2.logger') + @patch('kombu.async.timer.logger') def test_apply_entry_error_handled(self, logger): t = timer2.Timer() t.schedule.on_error = None @@ -171,7 +172,7 @@ class test_Timer(Case): t._do_enter = Mock() e = Mock() t.enter(e, 13, 0) - t._do_enter.assert_called_with('enter', e, 13, priority=0) + t._do_enter.assert_called_with('enter_at', e, 13, priority=0) def test_test_enter_after(self): t = timer2.Timer() diff --git a/awx/lib/site-packages/celery/tests/utils/test_timeutils.py b/awx/lib/site-packages/celery/tests/utils/test_timeutils.py new file mode 100644 index 0000000000..5849597cb4 --- /dev/null +++ b/awx/lib/site-packages/celery/tests/utils/test_timeutils.py @@ -0,0 +1,264 @@ +from __future__ import absolute_import + +import pytz + +from datetime import datetime, timedelta, tzinfo +from pytz import AmbiguousTimeError + +from celery.utils.timeutils import ( + delta_resolution, + humanize_seconds, + maybe_iso8601, + maybe_timedelta, + timedelta_seconds, + timezone, + rate, + remaining, + make_aware, + maybe_make_aware, + localize, + LocalTimezone, + ffwd, + utcoffset, +) +from celery.utils.iso8601 import parse_iso8601 +from celery.tests.case import Case, Mock, patch + + +class test_LocalTimezone(Case): + + def test_daylight(self): + with patch('celery.utils.timeutils._time') as time: + time.timezone = 3600 + time.daylight = False + x = LocalTimezone() + self.assertEqual(x.STDOFFSET, timedelta(seconds=-3600)) + self.assertEqual(x.DSTOFFSET, x.STDOFFSET) + time.daylight = True + time.altzone = 3600 + y = LocalTimezone() + self.assertEqual(y.STDOFFSET, timedelta(seconds=-3600)) + self.assertEqual(y.DSTOFFSET, timedelta(seconds=-3600)) + + self.assertTrue(repr(y)) + + y._isdst = Mock() + y._isdst.return_value = True + self.assertTrue(y.utcoffset(datetime.now())) + self.assertFalse(y.dst(datetime.now())) + y._isdst.return_value = False + self.assertTrue(y.utcoffset(datetime.now())) + self.assertFalse(y.dst(datetime.now())) + + self.assertTrue(y.tzname(datetime.now())) + + +class test_iso8601(Case): + + def test_parse_with_timezone(self): + d = datetime.utcnow().replace(tzinfo=pytz.utc) + self.assertEqual(parse_iso8601(d.isoformat()), d) + # 2013-06-07T20:12:51.775877+00:00 + iso = d.isoformat() + iso1 = iso.replace('+00:00', '-01:00') + d1 = parse_iso8601(iso1) + self.assertEqual(d1.tzinfo._minutes, -60) + iso2 = iso.replace('+00:00', '+01:00') + d2 = parse_iso8601(iso2) + self.assertEqual(d2.tzinfo._minutes, +60) + + +class test_timeutils(Case): + + def test_delta_resolution(self): + D = delta_resolution + dt = datetime(2010, 3, 30, 11, 50, 58, 41065) + deltamap = ((timedelta(days=2), datetime(2010, 3, 30, 0, 0)), + (timedelta(hours=2), datetime(2010, 3, 30, 11, 0)), + (timedelta(minutes=2), datetime(2010, 3, 30, 11, 50)), + (timedelta(seconds=2), dt)) + for delta, shoulda in deltamap: + self.assertEqual(D(dt, delta), shoulda) + + def test_timedelta_seconds(self): + deltamap = ((timedelta(seconds=1), 1), + (timedelta(seconds=27), 27), + (timedelta(minutes=3), 3 * 60), + (timedelta(hours=4), 4 * 60 * 60), + (timedelta(days=3), 3 * 86400)) + for delta, seconds in deltamap: + self.assertEqual(timedelta_seconds(delta), seconds) + + def test_timedelta_seconds_returns_0_on_negative_time(self): + delta = timedelta(days=-2) + self.assertEqual(timedelta_seconds(delta), 0) + + def test_humanize_seconds(self): + t = ((4 * 60 * 60 * 24, '4.00 days'), + (1 * 60 * 60 * 24, '1.00 day'), + (4 * 60 * 60, '4.00 hours'), + (1 * 60 * 60, '1.00 hour'), + (4 * 60, '4.00 minutes'), + (1 * 60, '1.00 minute'), + (4, '4.00 seconds'), + (1, '1.00 second'), + (4.3567631221, '4.36 seconds'), + (0, 'now')) + + for seconds, human in t: + self.assertEqual(humanize_seconds(seconds), human) + + self.assertEqual(humanize_seconds(4, prefix='about '), + 'about 4.00 seconds') + + def test_maybe_iso8601_datetime(self): + now = datetime.now() + self.assertIs(maybe_iso8601(now), now) + + def test_maybe_timedelta(self): + D = maybe_timedelta + + for i in (30, 30.6): + self.assertEqual(D(i), timedelta(seconds=i)) + + self.assertEqual(D(timedelta(days=2)), timedelta(days=2)) + + def test_remaining_relative(self): + remaining(datetime.utcnow(), timedelta(hours=1), relative=True) + + +class test_timezone(Case): + + def test_get_timezone_with_pytz(self): + self.assertTrue(timezone.get_timezone('UTC')) + + def test_tz_or_local(self): + self.assertEqual(timezone.tz_or_local(), timezone.local) + self.assertTrue(timezone.tz_or_local(timezone.utc)) + + def test_to_local(self): + self.assertTrue( + timezone.to_local(make_aware(datetime.utcnow(), timezone.utc)), + ) + self.assertTrue( + timezone.to_local(datetime.utcnow()) + ) + + def test_to_local_fallback(self): + self.assertTrue( + timezone.to_local_fallback( + make_aware(datetime.utcnow(), timezone.utc)), + ) + self.assertTrue( + timezone.to_local_fallback(datetime.utcnow()) + ) + + +class test_make_aware(Case): + + def test_tz_without_localize(self): + tz = tzinfo() + self.assertFalse(hasattr(tz, 'localize')) + wtz = make_aware(datetime.utcnow(), tz) + self.assertEqual(wtz.tzinfo, tz) + + def test_when_has_localize(self): + + class tzz(tzinfo): + raises = False + + def localize(self, dt, is_dst=None): + self.localized = True + if self.raises and is_dst is None: + self.raised = True + raise AmbiguousTimeError() + return 1 # needed by min() in Python 3 (None not hashable) + + tz = tzz() + make_aware(datetime.utcnow(), tz) + self.assertTrue(tz.localized) + + tz2 = tzz() + tz2.raises = True + make_aware(datetime.utcnow(), tz2) + self.assertTrue(tz2.localized) + self.assertTrue(tz2.raised) + + def test_maybe_make_aware(self): + aware = datetime.utcnow().replace(tzinfo=timezone.utc) + self.assertTrue(maybe_make_aware(aware), timezone.utc) + naive = datetime.utcnow() + self.assertTrue(maybe_make_aware(naive)) + + +class test_localize(Case): + + def test_tz_without_normalize(self): + tz = tzinfo() + self.assertFalse(hasattr(tz, 'normalize')) + self.assertTrue(localize(make_aware(datetime.utcnow(), tz), tz)) + + def test_when_has_normalize(self): + + class tzz(tzinfo): + raises = None + + def normalize(self, dt, **kwargs): + self.normalized = True + if self.raises and kwargs and kwargs.get('is_dst') is None: + self.raised = True + raise self.raises + return 1 # needed by min() in Python 3 (None not hashable) + + tz = tzz() + localize(make_aware(datetime.utcnow(), tz), tz) + self.assertTrue(tz.normalized) + + tz2 = tzz() + tz2.raises = AmbiguousTimeError() + localize(make_aware(datetime.utcnow(), tz2), tz2) + self.assertTrue(tz2.normalized) + self.assertTrue(tz2.raised) + + tz3 = tzz() + tz3.raises = TypeError() + localize(make_aware(datetime.utcnow(), tz3), tz3) + self.assertTrue(tz3.normalized) + self.assertTrue(tz3.raised) + + +class test_rate_limit_string(Case): + + def test_conversion(self): + self.assertEqual(rate(999), 999) + self.assertEqual(rate(7.5), 7.5) + self.assertEqual(rate('2.5/s'), 2.5) + self.assertEqual(rate('1456/s'), 1456) + self.assertEqual(rate('100/m'), + 100 / 60.0) + self.assertEqual(rate('10/h'), + 10 / 60.0 / 60.0) + + for zero in (0, None, '0', '0/m', '0/h', '0/s', '0.0/s'): + self.assertEqual(rate(zero), 0) + + +class test_ffwd(Case): + + def test_repr(self): + x = ffwd(year=2012) + self.assertTrue(repr(x)) + + def test_radd_with_unknown_gives_NotImplemented(self): + x = ffwd(year=2012) + self.assertEqual(x.__radd__(object()), NotImplemented) + + +class test_utcoffset(Case): + + def test_utcoffset(self): + with patch('celery.utils.timeutils._time') as _time: + _time.daylight = True + self.assertIsNotNone(utcoffset()) + _time.daylight = False + self.assertIsNotNone(utcoffset()) diff --git a/awx/lib/site-packages/celery/tests/utils/test_utils.py b/awx/lib/site-packages/celery/tests/utils/test_utils.py new file mode 100644 index 0000000000..2837ad6369 --- /dev/null +++ b/awx/lib/site-packages/celery/tests/utils/test_utils.py @@ -0,0 +1,108 @@ +from __future__ import absolute_import + +import pytz + +from datetime import datetime, date, time, timedelta + +from kombu import Queue + +from celery.utils import ( + chunks, + is_iterable, + cached_property, + warn_deprecated, + worker_direct, + gen_task_name, + jsonify, +) +from celery.tests.case import Case, Mock, patch + + +def double(x): + return x * 2 + + +class test_worker_direct(Case): + + def test_returns_if_queue(self): + q = Queue('foo') + self.assertIs(worker_direct(q), q) + + +class test_gen_task_name(Case): + + def test_no_module(self): + app = Mock() + app.name == '__main__' + self.assertTrue(gen_task_name(app, 'foo', 'axsadaewe')) + + +class test_jsonify(Case): + + def test_simple(self): + self.assertTrue(jsonify(Queue('foo'))) + self.assertTrue(jsonify(['foo', 'bar', 'baz'])) + self.assertTrue(jsonify({'foo': 'bar'})) + self.assertTrue(jsonify(datetime.utcnow())) + self.assertTrue(jsonify(datetime.utcnow().replace(tzinfo=pytz.utc))) + self.assertTrue(jsonify(datetime.utcnow().replace(microsecond=0))) + self.assertTrue(jsonify(date(2012, 1, 1))) + self.assertTrue(jsonify(time(hour=1, minute=30))) + self.assertTrue(jsonify(time(hour=1, minute=30, microsecond=3))) + self.assertTrue(jsonify(timedelta(seconds=30))) + self.assertTrue(jsonify(10)) + self.assertTrue(jsonify(10.3)) + self.assertTrue(jsonify('hello')) + + with self.assertRaises(ValueError): + jsonify(object()) + + +class test_chunks(Case): + + def test_chunks(self): + + # n == 2 + x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2) + self.assertListEqual( + list(x), + [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]], + ) + + # n == 3 + x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3) + self.assertListEqual( + list(x), + [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]], + ) + + # n == 2 (exact) + x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), 2) + self.assertListEqual( + list(x), + [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]], + ) + + +class test_utils(Case): + + def test_is_iterable(self): + for a in 'f', ['f'], ('f', ), {'f': 'f'}: + self.assertTrue(is_iterable(a)) + for b in object(), 1: + self.assertFalse(is_iterable(b)) + + def test_cached_property(self): + + def fun(obj): + return fun.value + + x = cached_property(fun) + self.assertIs(x.__get__(None), x) + self.assertIs(x.__set__(None, None), x) + self.assertIs(x.__delete__(None), x) + + @patch('warnings.warn') + def test_warn_deprecated(self, warn): + warn_deprecated('Foo') + self.assertTrue(warn.called) diff --git a/awx/lib/site-packages/celery/tests/worker/test_autoreload.py b/awx/lib/site-packages/celery/tests/worker/test_autoreload.py index 38d8c9ed80..aa550d930f 100644 --- a/awx/lib/site-packages/celery/tests/worker/test_autoreload.py +++ b/awx/lib/site-packages/celery/tests/worker/test_autoreload.py @@ -1,11 +1,9 @@ from __future__ import absolute_import -from __future__ import with_statement import errno import select import sys -from mock import Mock, patch from time import time from celery.worker import autoreload @@ -20,7 +18,7 @@ from celery.worker.autoreload import ( Autoreloader, ) -from celery.tests.utils import AppCase, Case, mock_open +from celery.tests.case import AppCase, Case, Mock, patch, mock_open class test_WorkerComponent(AppCase): @@ -43,9 +41,11 @@ class test_WorkerComponent(AppCase): x.instantiate = Mock() r = x.create(w) x.instantiate.assert_called_with(w.autoreloader_cls, w) + x.register_with_event_loop(w, w.hub) self.assertIsNone(r) - w.hub.on_init.append.assert_called_with(w.autoreloader.on_poll_init) - w.hub.on_close.append.assert_called_with(w.autoreloader.on_poll_close) + w.hub.on_close.add.assert_called_with( + w.autoreloader.on_event_loop_close, + ) class test_file_hash(Case): @@ -98,8 +98,14 @@ class test_StatMonitor(Case): stat.side_effect = OSError() x.start() + @patch('os.stat') + def test_mtime_stat_raises(self, stat): + stat.side_effect = ValueError() + x = StatMonitor(['a', 'b']) + x._mtime('a') -class test_KQueueMontior(Case): + +class test_KQueueMonitor(Case): @patch('select.kqueue', create=True) @patch('os.close') @@ -115,6 +121,35 @@ class test_KQueueMontior(Case): close.side_effect.errno = errno.EBADF x.stop() + def test_register_with_event_loop(self): + x = KQueueMonitor(['a', 'b']) + hub = Mock() + x.add_events = Mock() + x.register_with_event_loop(hub) + x.add_events.assert_called_with(hub.poller) + self.assertEqual( + hub.poller.on_file_change, + x.handle_event, + ) + + def test_on_event_loop_close(self): + x = KQueueMonitor(['a', 'b']) + x.close = Mock() + hub = Mock() + x.on_event_loop_close(hub) + x.close.assert_called_with(hub.poller) + + def test_handle_event(self): + x = KQueueMonitor(['a', 'b']) + x.on_change = Mock() + eA = Mock() + eA.ident = 'a' + eB = Mock() + eB.ident = 'b' + x.fdmap = {'a': 'A', 'b': 'B'} + x.handle_event([eA, eB]) + x.on_change.assert_called_with(['A', 'B']) + @patch('kombu.utils.eventio.kqueue', create=True) @patch('kombu.utils.eventio.kevent', create=True) @patch('os.open') @@ -208,6 +243,33 @@ class test_default_implementation(Case): class test_Autoreloader(AppCase): + def test_register_with_event_loop(self): + x = Autoreloader(Mock(), modules=[__name__]) + hub = Mock() + x._monitor = None + x.on_init = Mock() + + def se(*args, **kwargs): + x._monitor = Mock() + x.on_init.side_effect = se + + x.register_with_event_loop(hub) + x.on_init.assert_called_with() + x._monitor.register_with_event_loop.assert_called_with(hub) + + x._monitor.register_with_event_loop.reset_mock() + x.register_with_event_loop(hub) + x._monitor.register_with_event_loop.assert_called_with(hub) + + def test_on_event_loop_close(self): + x = Autoreloader(Mock(), modules=[__name__]) + hub = Mock() + x._monitor = Mock() + x.on_event_loop_close(hub) + x._monitor.on_event_loop_close.assert_called_with(hub) + x._monitor = None + x.on_event_loop_close(hub) + @patch('celery.worker.autoreload.file_hash') def test_start(self, fhash): x = Autoreloader(Mock(), modules=[__name__]) @@ -233,6 +295,8 @@ class test_Autoreloader(AppCase): self.assertTrue(x._maybe_modified(__name__)) x._hashes[__name__] = 'abcd' self.assertFalse(x._maybe_modified(__name__)) + exists.return_value = False + self.assertFalse(x._maybe_modified(__name__)) def test_on_change(self): x = Autoreloader(Mock(), modules=[__name__]) diff --git a/awx/lib/site-packages/celery/tests/worker/test_autoscale.py b/awx/lib/site-packages/celery/tests/worker/test_autoscale.py index f82aa7c326..45ea488cc0 100644 --- a/awx/lib/site-packages/celery/tests/worker/test_autoscale.py +++ b/awx/lib/site-packages/celery/tests/worker/test_autoscale.py @@ -2,14 +2,11 @@ from __future__ import absolute_import import sys -from time import time - -from mock import Mock, patch - from celery.concurrency.base import BasePool +from celery.five import monotonic from celery.worker import state from celery.worker import autoscale -from celery.tests.utils import Case, sleepdeprived +from celery.tests.case import AppCase, Mock, patch, sleepdeprived class Object(object): @@ -40,9 +37,37 @@ class MockPool(BasePool): return self._pool._processes -class test_Autoscaler(Case): +class test_WorkerComponent(AppCase): - def setUp(self): + def test_register_with_event_loop(self): + parent = Mock(name='parent') + parent.autoscale = True + parent.consumer.on_task_message = set() + w = autoscale.WorkerComponent(parent) + self.assertIsNone(parent.autoscaler) + self.assertTrue(w.enabled) + + hub = Mock(name='hub') + w.create(parent) + w.register_with_event_loop(parent, hub) + self.assertIn( + parent.autoscaler.maybe_scale, + parent.consumer.on_task_message, + ) + hub.call_repeatedly.assert_called_with( + parent.autoscaler.keepalive, parent.autoscaler.maybe_scale, + ) + + parent.hub = hub + hub.on_init = [] + w.instantiate = Mock() + w.register_with_event_loop(parent, Mock(name='loop')) + self.assertTrue(parent.consumer.on_task_message) + + +class test_Autoscaler(AppCase): + + def setup(self): self.pool = MockPool(3) def test_stop(self): @@ -57,7 +82,8 @@ class test_Autoscaler(Case): def join(self, timeout=None): self.joined = True - x = Scaler(self.pool, 10, 3) + worker = Mock(name='worker') + x = Scaler(self.pool, 10, 3, worker=worker) x._is_stopped.set() x.stop() self.assertTrue(x.joined) @@ -68,7 +94,8 @@ class test_Autoscaler(Case): @sleepdeprived(autoscale) def test_body(self): - x = autoscale.Autoscaler(self.pool, 10, 3) + worker = Mock(name='worker') + x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) x.body() self.assertEqual(x.pool.num_processes, 3) for i in range(20): @@ -76,12 +103,14 @@ class test_Autoscaler(Case): x.body() x.body() self.assertEqual(x.pool.num_processes, 10) + self.assertTrue(worker.consumer._update_prefetch_count.called) state.reserved_requests.clear() x.body() self.assertEqual(x.pool.num_processes, 10) - x._last_action = time() - 10000 + x._last_action = monotonic() - 10000 x.body() self.assertEqual(x.pool.num_processes, 3) + self.assertTrue(worker.consumer._update_prefetch_count.called) def test_run(self): @@ -92,30 +121,34 @@ class test_Autoscaler(Case): self.scale_called = True self._is_shutdown.set() - x = Scaler(self.pool, 10, 3) + worker = Mock(name='worker') + x = Scaler(self.pool, 10, 3, worker=worker) x.run() self.assertTrue(x._is_shutdown.isSet()) self.assertTrue(x._is_stopped.isSet()) self.assertTrue(x.scale_called) def test_shrink_raises_exception(self): - x = autoscale.Autoscaler(self.pool, 10, 3) + worker = Mock(name='worker') + x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) x.scale_up(3) - x._last_action = time() - 10000 + x._last_action = monotonic() - 10000 x.pool.shrink_raises_exception = True x.scale_down(1) @patch('celery.worker.autoscale.debug') def test_shrink_raises_ValueError(self, debug): - x = autoscale.Autoscaler(self.pool, 10, 3) + worker = Mock(name='worker') + x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) x.scale_up(3) - x._last_action = time() - 10000 + x._last_action = monotonic() - 10000 x.pool.shrink_raises_ValueError = True x.scale_down(1) self.assertTrue(debug.call_count) def test_update_and_force(self): - x = autoscale.Autoscaler(self.pool, 10, 3) + worker = Mock(name='worker') + x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) self.assertEqual(x.processes, 3) x.force_scale_up(5) self.assertEqual(x.processes, 8) @@ -137,7 +170,8 @@ class test_Autoscaler(Case): x.update(max=None, min=None) def test_info(self): - x = autoscale.Autoscaler(self.pool, 10, 3) + worker = Mock(name='worker') + x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) info = x.info() self.assertEqual(info['max'], 10) self.assertEqual(info['min'], 3) @@ -151,7 +185,8 @@ class test_Autoscaler(Case): def body(self): self._is_shutdown.set() raise OSError('foo') - x = _Autoscaler(self.pool, 10, 3) + worker = Mock(name='worker') + x = _Autoscaler(self.pool, 10, 3, worker=worker) stderr = Mock() p, sys.stderr = sys.stderr, stderr diff --git a/awx/lib/site-packages/celery/tests/worker/test_bootsteps.py b/awx/lib/site-packages/celery/tests/worker/test_bootsteps.py index 6b50204e83..522d263b3d 100644 --- a/awx/lib/site-packages/celery/tests/worker/test_bootsteps.py +++ b/awx/lib/site-packages/celery/tests/worker/test_bootsteps.py @@ -1,39 +1,74 @@ from __future__ import absolute_import -from __future__ import with_statement -from mock import Mock +from celery import bootsteps -from celery.worker import bootsteps - -from celery.tests.utils import AppCase, Case +from celery.tests.case import AppCase, Mock, patch -class test_Component(Case): +class test_StepFormatter(AppCase): - class Def(bootsteps.Component): - name = 'test_Component.Def' + def test_get_prefix(self): + f = bootsteps.StepFormatter() + s = Mock() + s.last = True + self.assertEqual(f._get_prefix(s), f.blueprint_prefix) - def test_components_must_be_named(self): - with self.assertRaises(NotImplementedError): + s2 = Mock() + s2.last = False + s2.conditional = True + self.assertEqual(f._get_prefix(s2), f.conditional_prefix) - class X(bootsteps.Component): - pass + s3 = Mock() + s3.last = s3.conditional = False + self.assertEqual(f._get_prefix(s3), '') - class Y(bootsteps.Component): - abstract = True + def test_node(self): + f = bootsteps.StepFormatter() + f.draw_node = Mock() + step = Mock() + step.last = False + f.node(step, x=3) + f.draw_node.assert_called_with(step, f.node_scheme, {'x': 3}) - def test_namespace_name(self, ns='test_namespace_name'): + step.last = True + f.node(step, x=3) + f.draw_node.assert_called_with(step, f.blueprint_scheme, {'x': 3}) - class X(bootsteps.Component): - namespace = ns + def test_edge(self): + f = bootsteps.StepFormatter() + f.draw_edge = Mock() + a, b = Mock(), Mock() + a.last = True + f.edge(a, b, x=6) + f.draw_edge.assert_called_with(a, b, f.edge_scheme, { + 'x': 6, 'arrowhead': 'none', 'color': 'darkseagreen3', + }) + + a.last = False + f.edge(a, b, x=6) + f.draw_edge.assert_called_with(a, b, f.edge_scheme, { + 'x': 6, + }) + + +class test_Step(AppCase): + + class Def(bootsteps.StartStopStep): + name = 'test_Step.Def' + + def setup(self): + self.steps = [] + + def test_blueprint_name(self, bp='test_blueprint_name'): + + class X(bootsteps.Step): + blueprint = bp name = 'X' - self.assertEqual(X.namespace, ns) self.assertEqual(X.name, 'X') - class Y(bootsteps.Component): - name = '%s.Y' % (ns, ) - self.assertEqual(Y.namespace, ns) - self.assertEqual(Y.name, 'Y') + class Y(bootsteps.Step): + name = '%s.Y' % bp + self.assertEqual(Y.name, '%s.Y' % bp) def test_init(self): self.assertTrue(self.Def(self)) @@ -70,14 +105,57 @@ class test_Component(Case): self.assertFalse(x.include(self)) self.assertFalse(x.create.call_count) + def test_repr(self): + x = self.Def(self) + self.assertTrue(repr(x)) -class test_StartStopComponent(Case): - class Def(bootsteps.StartStopComponent): - name = 'test_StartStopComponent.Def' +class test_ConsumerStep(AppCase): - def setUp(self): - self.components = [] + def test_interface(self): + step = bootsteps.ConsumerStep(self) + with self.assertRaises(NotImplementedError): + step.get_consumers(self) + + def test_start_stop_shutdown(self): + consumer = Mock() + self.connection = Mock() + + class Step(bootsteps.ConsumerStep): + + def get_consumers(self, c): + return [consumer] + + step = Step(self) + self.assertEqual(step.get_consumers(self), [consumer]) + + step.start(self) + consumer.consume.assert_called_with() + step.stop(self) + consumer.cancel.assert_called_with() + + step.shutdown(self) + consumer.channel.close.assert_called_with() + + def test_start_no_consumers(self): + self.connection = Mock() + + class Step(bootsteps.ConsumerStep): + + def get_consumers(self, c): + return () + + step = Step(self) + step.start(self) + + +class test_StartStopStep(AppCase): + + class Def(bootsteps.StartStopStep): + name = 'test_StartStopStep.Def' + + def setup(self): + self.steps = [] def test_start__stop(self): x = self.Def(self) @@ -85,141 +163,176 @@ class test_StartStopComponent(Case): # include creates the underlying object and sets # its x.obj attribute to it, as well as appending - # it to the parent.components list. + # it to the parent.steps list. x.include(self) - self.assertTrue(self.components) - self.assertIs(self.components[0], x.obj) + self.assertTrue(self.steps) + self.assertIs(self.steps[0], x) - x.start() + x.start(self) x.obj.start.assert_called_with() - x.stop() + x.stop(self) x.obj.stop.assert_called_with() + x.obj = None + self.assertIsNone(x.start(self)) + def test_include_when_disabled(self): x = self.Def(self) x.enabled = False x.include(self) - self.assertFalse(self.components) + self.assertFalse(self.steps) - def test_terminate_when_terminable(self): + def test_terminate(self): x = self.Def(self) - x.terminable = True x.create = Mock() x.include(self) - x.terminate() - x.obj.terminate.assert_called_with() - self.assertFalse(x.obj.stop.call_count) - - def test_terminate_calls_stop_when_not_terminable(self): - x = self.Def(self) - x.terminable = False - x.create = Mock() - - x.include(self) - x.terminate() + delattr(x.obj, 'terminate') + x.terminate(self) x.obj.stop.assert_called_with() - self.assertFalse(x.obj.terminate.call_count) -class test_Namespace(AppCase): +class test_Blueprint(AppCase): - class NS(bootsteps.Namespace): - name = 'test_Namespace' + class Blueprint(bootsteps.Blueprint): + name = 'test_Blueprint' - class ImportingNS(bootsteps.Namespace): + def test_steps_added_to_unclaimed(self): - def __init__(self, *args, **kwargs): - bootsteps.Namespace.__init__(self, *args, **kwargs) - self.imported = [] + class tnA(bootsteps.Step): + name = 'test_Blueprint.A' - def modules(self): - return ['A', 'B', 'C'] + class tnB(bootsteps.Step): + name = 'test_Blueprint.B' - def import_module(self, module): - self.imported.append(module) - - def test_components_added_to_unclaimed(self): - - class tnA(bootsteps.Component): - name = 'test_Namespace.A' - - class tnB(bootsteps.Component): - name = 'test_Namespace.B' - - class xxA(bootsteps.Component): + class xxA(bootsteps.Step): name = 'xx.A' - self.assertIn('A', self.NS._unclaimed['test_Namespace']) - self.assertIn('B', self.NS._unclaimed['test_Namespace']) - self.assertIn('A', self.NS._unclaimed['xx']) - self.assertNotIn('B', self.NS._unclaimed['xx']) + class Blueprint(self.Blueprint): + default_steps = [tnA, tnB] + blueprint = Blueprint(app=self.app) + + self.assertIn(tnA, blueprint._all_steps()) + self.assertIn(tnB, blueprint._all_steps()) + self.assertNotIn(xxA, blueprint._all_steps()) def test_init(self): - ns = self.NS(app=self.app) - self.assertIs(ns.app, self.app) - self.assertEqual(ns.name, 'test_Namespace') - self.assertFalse(ns.services) + blueprint = self.Blueprint(app=self.app) + self.assertIs(blueprint.app, self.app) + self.assertEqual(blueprint.name, 'test_Blueprint') - def test_interface_modules(self): - self.NS(app=self.app).modules() + def test_close__on_close_is_None(self): + blueprint = self.Blueprint(app=self.app) + blueprint.on_close = None + blueprint.send_all = Mock() + blueprint.close(1) + blueprint.send_all.assert_called_with( + 1, 'close', 'closing', reverse=False, + ) - def test_load_modules(self): - x = self.ImportingNS(app=self.app) - x.load_modules() - self.assertListEqual(x.imported, ['A', 'B', 'C']) + def test_send_all_with_None_steps(self): + parent = Mock() + blueprint = self.Blueprint(app=self.app) + parent.steps = [None, None, None] + blueprint.send_all(parent, 'close', 'Closing', reverse=False) + + def test_join_raises_IGNORE_ERRORS(self): + prev, bootsteps.IGNORE_ERRORS = bootsteps.IGNORE_ERRORS, (KeyError, ) + try: + blueprint = self.Blueprint(app=self.app) + blueprint.shutdown_complete = Mock() + blueprint.shutdown_complete.wait.side_effect = KeyError('luke') + blueprint.join(timeout=10) + blueprint.shutdown_complete.wait.assert_called_with(timeout=10) + finally: + bootsteps.IGNORE_ERRORS = prev + + def test_connect_with(self): + + class b1s1(bootsteps.Step): + pass + + class b1s2(bootsteps.Step): + last = True + + class b2s1(bootsteps.Step): + pass + + class b2s2(bootsteps.Step): + last = True + + b1 = self.Blueprint([b1s1, b1s2], app=self.app) + b2 = self.Blueprint([b2s1, b2s2], app=self.app) + b1.apply(Mock()) + b2.apply(Mock()) + b1.connect_with(b2) + + self.assertIn(b1s1, b1.graph) + self.assertIn(b2s1, b1.graph) + self.assertIn(b2s2, b1.graph) + + self.assertTrue(repr(b1s1)) + self.assertTrue(str(b1s1)) + + def test_topsort_raises_KeyError(self): + + class Step(bootsteps.Step): + requires = ('xyxxx.fsdasewe.Unknown', ) + + b = self.Blueprint([Step], app=self.app) + b.steps = b.claim_steps() + with self.assertRaises(ImportError): + b._finalize_steps(b.steps) + Step.requires = () + + b.steps = b.claim_steps() + b._finalize_steps(b.steps) + + with patch('celery.bootsteps.DependencyGraph') as Dep: + g = Dep.return_value = Mock() + g.topsort.side_effect = KeyError('foo') + with self.assertRaises(KeyError): + b._finalize_steps(b.steps) def test_apply(self): - class MyNS(bootsteps.Namespace): + class MyBlueprint(bootsteps.Blueprint): name = 'test_apply' def modules(self): return ['A', 'B'] - class A(bootsteps.Component): - name = 'test_apply.A' - requires = ['C'] - - class B(bootsteps.Component): + class B(bootsteps.Step): name = 'test_apply.B' - class C(bootsteps.Component): + class C(bootsteps.Step): name = 'test_apply.C' - requires = ['B'] + requires = [B] - class D(bootsteps.Component): + class A(bootsteps.Step): + name = 'test_apply.A' + requires = [C] + + class D(bootsteps.Step): name = 'test_apply.D' last = True - x = MyNS(app=self.app) - x.import_module = Mock() + x = MyBlueprint([A, D], app=self.app) x.apply(self) - self.assertItemsEqual(x.components.values(), [A, B, C, D]) - self.assertTrue(x.import_module.call_count) + self.assertIsInstance(x.order[0], B) + self.assertIsInstance(x.order[1], C) + self.assertIsInstance(x.order[2], A) + self.assertIsInstance(x.order[3], D) + self.assertIn(A, x.types) + self.assertIs(x[A.name], x.order[2]) - for boot_step in x.boot_steps: - self.assertEqual(boot_step.namespace, x) + def test_find_last_but_no_steps(self): - self.assertIsInstance(x.boot_steps[0], B) - self.assertIsInstance(x.boot_steps[1], C) - self.assertIsInstance(x.boot_steps[2], A) - self.assertIsInstance(x.boot_steps[3], D) - - self.assertIs(x['A'], A) - - def test_import_module(self): - x = self.NS(app=self.app) - import os - self.assertIs(x.import_module('os'), os) - - def test_find_last_but_no_components(self): - - class MyNS(bootsteps.Namespace): + class MyBlueprint(bootsteps.Blueprint): name = 'qwejwioqjewoqiej' - x = MyNS(app=self.app) + x = MyBlueprint(app=self.app) x.apply(self) self.assertIsNone(x._find_last()) diff --git a/awx/lib/site-packages/celery/tests/worker/test_components.py b/awx/lib/site-packages/celery/tests/worker/test_components.py new file mode 100644 index 0000000000..b39865db40 --- /dev/null +++ b/awx/lib/site-packages/celery/tests/worker/test_components.py @@ -0,0 +1,38 @@ +from __future__ import absolute_import + +# some of these are tested in test_worker, so I've only written tests +# here to complete coverage. Should move everyting to this module at some +# point [-ask] + +from celery.worker.components import ( + Queues, + Pool, +) + +from celery.tests.case import AppCase, Mock + + +class test_Queues(AppCase): + + def test_create_when_eventloop(self): + w = Mock() + w.use_eventloop = w.pool_putlocks = w.pool_cls.uses_semaphore = True + q = Queues(w) + q.create(w) + self.assertIs(w.process_task, w._process_task_sem) + + +class test_Pool(AppCase): + + def test_close_terminate(self): + w = Mock() + comp = Pool(w) + pool = w.pool = Mock() + comp.close(w) + pool.close.assert_called_with() + comp.terminate(w) + pool.terminate.assert_called_with() + + w.pool = None + comp.close(w) + comp.terminate(w) diff --git a/awx/lib/site-packages/celery/tests/worker/test_consumer.py b/awx/lib/site-packages/celery/tests/worker/test_consumer.py new file mode 100644 index 0000000000..f287ce417e --- /dev/null +++ b/awx/lib/site-packages/celery/tests/worker/test_consumer.py @@ -0,0 +1,454 @@ +from __future__ import absolute_import + +import errno +import socket + +from billiard.exceptions import RestartFreqExceeded + +from celery.datastructures import LimitedSet +from celery.worker import state as worker_state +from celery.worker.consumer import ( + Consumer, + Heart, + Tasks, + Agent, + Mingle, + Gossip, + dump_body, + CLOSE, +) + +from celery.tests.case import AppCase, Mock, SkipTest, call, patch + + +class test_Consumer(AppCase): + + def get_consumer(self, no_hub=False, **kwargs): + consumer = Consumer( + on_task_request=Mock(), + init_callback=Mock(), + pool=Mock(), + app=self.app, + timer=Mock(), + controller=Mock(), + hub=None if no_hub else Mock(), + **kwargs + ) + consumer.blueprint = Mock() + consumer._restart_state = Mock() + consumer.connection = Mock() + consumer.connection_errors = (socket.error, OSError, ) + return consumer + + def test_taskbuckets_defaultdict(self): + c = self.get_consumer() + self.assertIsNone(c.task_buckets['fooxasdwx.wewe']) + + def test_dump_body_buffer(self): + msg = Mock() + msg.body = 'str' + try: + buf = buffer(msg.body) + except NameError: + raise SkipTest('buffer type not available') + self.assertTrue(dump_body(msg, buf)) + + def test_sets_heartbeat(self): + c = self.get_consumer(amqheartbeat=10) + self.assertEqual(c.amqheartbeat, 10) + self.app.conf.BROKER_HEARTBEAT = 20 + c = self.get_consumer(amqheartbeat=None) + self.assertEqual(c.amqheartbeat, 20) + + def test_gevent_bug_disables_connection_timeout(self): + with patch('celery.worker.consumer._detect_environment') as de: + de.return_value = 'gevent' + self.app.conf.BROKER_CONNECTION_TIMEOUT = 33.33 + self.get_consumer() + self.assertIsNone(self.app.conf.BROKER_CONNECTION_TIMEOUT) + + def test_limit_task(self): + c = self.get_consumer() + + with patch('celery.worker.consumer.task_reserved') as reserved: + bucket = Mock() + request = Mock() + bucket.can_consume.return_value = True + + c._limit_task(request, bucket, 3) + bucket.can_consume.assert_called_with(3) + reserved.assert_called_with(request) + c.on_task_request.assert_called_with(request) + + with patch('celery.worker.consumer.task_reserved') as reserved: + bucket.can_consume.return_value = False + bucket.expected_time.return_value = 3.33 + c._limit_task(request, bucket, 4) + bucket.can_consume.assert_called_with(4) + c.timer.call_after.assert_called_with( + 3.33, c._limit_task, (request, bucket, 4), + ) + bucket.expected_time.assert_called_with(4) + self.assertFalse(reserved.called) + + def test_start_blueprint_raises_EMFILE(self): + c = self.get_consumer() + exc = c.blueprint.start.side_effect = OSError() + exc.errno = errno.EMFILE + + with self.assertRaises(OSError): + c.start() + + def test_max_restarts_exceeded(self): + c = self.get_consumer() + + def se(*args, **kwargs): + c.blueprint.state = CLOSE + raise RestartFreqExceeded() + c._restart_state.step.side_effect = se + c.blueprint.start.side_effect = socket.error() + + with patch('celery.worker.consumer.sleep') as sleep: + c.start() + sleep.assert_called_with(1) + + def _closer(self, c): + def se(*args, **kwargs): + c.blueprint.state = CLOSE + return se + + def test_collects_at_restart(self): + c = self.get_consumer() + c.connection.collect.side_effect = MemoryError() + c.blueprint.start.side_effect = socket.error() + c.blueprint.restart.side_effect = self._closer(c) + c.start() + c.connection.collect.assert_called_with() + + def test_register_with_event_loop(self): + c = self.get_consumer() + c.register_with_event_loop(Mock(name='loop')) + + def test_on_close_clears_semaphore_timer_and_reqs(self): + with patch('celery.worker.consumer.reserved_requests') as reserved: + c = self.get_consumer() + c.on_close() + c.controller.semaphore.clear.assert_called_with() + c.timer.clear.assert_called_with() + reserved.clear.assert_called_with() + c.pool.flush.assert_called_with() + + c.controller = None + c.timer = None + c.pool = None + c.on_close() + + def test_connect_error_handler(self): + self.app.connection = Mock() + conn = self.app.connection.return_value = Mock() + c = self.get_consumer() + self.assertTrue(c.connect()) + self.assertTrue(conn.ensure_connection.called) + errback = conn.ensure_connection.call_args[0][0] + conn.alt = [(1, 2, 3)] + errback(Mock(), 0) + + +class test_Heart(AppCase): + + def test_start(self): + c = Mock() + c.timer = Mock() + c.event_dispatcher = Mock() + + with patch('celery.worker.heartbeat.Heart') as hcls: + h = Heart(c) + self.assertTrue(h.enabled) + self.assertIsNone(c.heart) + + h.start(c) + self.assertTrue(c.heart) + hcls.assert_called_with(c.timer, c.event_dispatcher) + c.heart.start.assert_called_with() + + +class test_Tasks(AppCase): + + def test_stop(self): + c = Mock() + tasks = Tasks(c) + self.assertIsNone(c.task_consumer) + self.assertIsNone(c.qos) + + c.task_consumer = Mock() + tasks.stop(c) + + def test_stop_already_stopped(self): + c = Mock() + tasks = Tasks(c) + tasks.stop(c) + + +class test_Agent(AppCase): + + def test_start(self): + c = Mock() + agent = Agent(c) + agent.instantiate = Mock() + agent.agent_cls = 'foo:Agent' + self.assertIsNotNone(agent.create(c)) + agent.instantiate.assert_called_with(agent.agent_cls, c.connection) + + +class test_Mingle(AppCase): + + def test_start_no_replies(self): + c = Mock() + mingle = Mingle(c) + I = c.app.control.inspect.return_value = Mock() + I.hello.return_value = {} + mingle.start(c) + + def test_start(self): + try: + c = Mock() + mingle = Mingle(c) + self.assertTrue(mingle.enabled) + + Aig = LimitedSet() + Big = LimitedSet() + Aig.add('Aig-1') + Aig.add('Aig-2') + Big.add('Big-1') + + I = c.app.control.inspect.return_value = Mock() + I.hello.return_value = { + 'A@example.com': { + 'clock': 312, + 'revoked': Aig._data, + }, + 'B@example.com': { + 'clock': 29, + 'revoked': Big._data, + }, + 'C@example.com': { + 'error': 'unknown method', + }, + } + + mingle.start(c) + I.hello.assert_called_with(c.hostname, worker_state.revoked._data) + c.app.clock.adjust.assert_has_calls([ + call(312), call(29), + ], any_order=True) + self.assertIn('Aig-1', worker_state.revoked) + self.assertIn('Aig-2', worker_state.revoked) + self.assertIn('Big-1', worker_state.revoked) + finally: + worker_state.revoked.clear() + + +class test_Gossip(AppCase): + + def test_init(self): + c = self.Consumer() + g = Gossip(c) + self.assertTrue(g.enabled) + self.assertIs(c.gossip, g) + + def test_election(self): + c = self.Consumer() + g = Gossip(c) + g.start(c) + g.election('id', 'topic', 'action') + self.assertListEqual(g.consensus_replies['id'], []) + g.dispatcher.send.assert_called_with( + 'worker-elect', id='id', topic='topic', cver=1, action='action', + ) + + def test_call_task(self): + c = self.Consumer() + g = Gossip(c) + g.start(c) + + with patch('celery.worker.consumer.signature') as signature: + sig = signature.return_value = Mock() + task = Mock() + g.call_task(task) + signature.assert_called_with(task, app=c.app) + sig.apply_async.assert_called_with() + + sig.apply_async.side_effect = MemoryError() + with patch('celery.worker.consumer.error') as error: + g.call_task(task) + self.assertTrue(error.called) + + def Event(self, id='id', clock=312, + hostname='foo@example.com', pid=4312, + topic='topic', action='action', cver=1): + return { + 'id': id, + 'clock': clock, + 'hostname': hostname, + 'pid': pid, + 'topic': topic, + 'action': action, + 'cver': cver, + } + + def test_on_elect(self): + c = self.Consumer() + g = Gossip(c) + g.start(c) + + event = self.Event('id1') + g.on_elect(event) + in_heap = g.consensus_requests['id1'] + self.assertTrue(in_heap) + g.dispatcher.send.assert_called_with('worker-elect-ack', id='id1') + + event.pop('clock') + with patch('celery.worker.consumer.error') as error: + g.on_elect(event) + self.assertTrue(error.called) + + def Consumer(self, hostname='foo@x.com', pid=4312): + c = Mock() + c.hostname = hostname + c.pid = pid + return c + + def setup_election(self, g, c): + g.start(c) + g.clock = self.app.clock + self.assertNotIn('idx', g.consensus_replies) + self.assertIsNone(g.on_elect_ack({'id': 'idx'})) + + g.state.alive_workers.return_value = [ + 'foo@x.com', 'bar@x.com', 'baz@x.com', + ] + g.consensus_replies['id1'] = [] + g.consensus_requests['id1'] = [] + e1 = self.Event('id1', 1, 'foo@x.com') + e2 = self.Event('id1', 2, 'bar@x.com') + e3 = self.Event('id1', 3, 'baz@x.com') + g.on_elect(e1) + g.on_elect(e2) + g.on_elect(e3) + self.assertEqual(len(g.consensus_requests['id1']), 3) + + with patch('celery.worker.consumer.info'): + g.on_elect_ack(e1) + self.assertEqual(len(g.consensus_replies['id1']), 1) + g.on_elect_ack(e2) + self.assertEqual(len(g.consensus_replies['id1']), 2) + g.on_elect_ack(e3) + with self.assertRaises(KeyError): + g.consensus_replies['id1'] + + def test_on_elect_ack_win(self): + c = self.Consumer(hostname='foo@x.com') # I will win + g = Gossip(c) + handler = g.election_handlers['topic'] = Mock() + self.setup_election(g, c) + handler.assert_called_with('action') + + def test_on_elect_ack_lose(self): + c = self.Consumer(hostname='bar@x.com') # I will lose + g = Gossip(c) + handler = g.election_handlers['topic'] = Mock() + self.setup_election(g, c) + self.assertFalse(handler.called) + + def test_on_elect_ack_win_but_no_action(self): + c = self.Consumer(hostname='foo@x.com') # I will win + g = Gossip(c) + g.election_handlers = {} + with patch('celery.worker.consumer.error') as error: + self.setup_election(g, c) + self.assertTrue(error.called) + + def test_on_node_join(self): + c = self.Consumer() + g = Gossip(c) + with patch('celery.worker.consumer.info') as info: + g.on_node_join(c) + info.assert_called_with('%s joined the party', 'foo@x.com') + + def test_on_node_leave(self): + c = self.Consumer() + g = Gossip(c) + with patch('celery.worker.consumer.info') as info: + g.on_node_leave(c) + info.assert_called_with('%s left', 'foo@x.com') + + def test_on_node_lost(self): + c = self.Consumer() + g = Gossip(c) + with patch('celery.worker.consumer.warn') as warn: + g.on_node_lost(c) + warn.assert_called_with('%s went missing!', 'foo@x.com') + + def test_register_timer(self): + c = self.Consumer() + g = Gossip(c) + g.register_timer() + c.timer.call_repeatedly.assert_called_with(g.interval, g.periodic) + tref = g._tref + g.register_timer() + tref.cancel.assert_called_with() + + def test_periodic(self): + c = self.Consumer() + g = Gossip(c) + g.on_node_lost = Mock() + state = g.state = Mock() + worker = Mock() + state.workers = {'foo': worker} + worker.alive = True + worker.hostname = 'foo' + g.periodic() + + worker.alive = False + g.periodic() + g.on_node_lost.assert_called_with(worker) + with self.assertRaises(KeyError): + state.workers['foo'] + + def test_on_message(self): + c = self.Consumer() + g = Gossip(c) + prepare = Mock() + prepare.return_value = 'worker-online', {} + g.update_state = Mock() + worker = Mock() + g.on_node_join = Mock() + g.on_node_leave = Mock() + g.update_state.return_value = worker, 1 + message = Mock() + message.delivery_info = {'routing_key': 'worker-online'} + message.headers = {'hostname': 'other'} + + handler = g.event_handlers['worker-online'] = Mock() + g.on_message(prepare, message) + handler.assert_called_with(message.payload) + g.event_handlers = {} + + g.on_message(prepare, message) + g.on_node_join.assert_called_with(worker) + + message.delivery_info = {'routing_key': 'worker-offline'} + prepare.return_value = 'worker-offline', {} + g.on_message(prepare, message) + g.on_node_leave.assert_called_with(worker) + + message.delivery_info = {'routing_key': 'worker-baz'} + prepare.return_value = 'worker-baz', {} + g.update_state.return_value = worker, 0 + g.on_message(prepare, message) + + g.on_node_leave.reset_mock() + message.headers = {'hostname': g.hostname} + g.on_message(prepare, message) + self.assertFalse(g.on_node_leave.called) + g.clock.forward.assert_called_with() diff --git a/awx/lib/site-packages/celery/tests/worker/test_control.py b/awx/lib/site-packages/celery/tests/worker/test_control.py index b743586bba..c47be9103d 100644 --- a/awx/lib/site-packages/celery/tests/worker/test_control.py +++ b/awx/lib/site-packages/celery/tests/worker/test_control.py @@ -1,67 +1,132 @@ from __future__ import absolute_import -from __future__ import with_statement import sys import socket +from collections import defaultdict from datetime import datetime, timedelta from kombu import pidbox -from mock import Mock, patch -from celery import current_app from celery.datastructures import AttributeDict -from celery.task import task +from celery.five import Queue as FastQueue from celery.utils import uuid from celery.utils.timer2 import Timer from celery.worker import WorkController as _WC from celery.worker import consumer from celery.worker import control -from celery.worker import state -from celery.worker.buckets import FastQueue -from celery.worker.job import TaskRequest +from celery.worker import state as worker_state +from celery.worker.job import Request from celery.worker.state import revoked from celery.worker.control import Panel -from celery.tests.utils import Case +from celery.worker.pidbox import Pidbox, gPidbox + +from celery.tests.case import AppCase, Mock, call, patch hostname = socket.gethostname() -@task(rate_limit=200) # for extra info in dump_tasks -def mytask(): - pass - - class WorkController(object): autoscaler = None + def stats(self): + return {'total': worker_state.total_count} + class Consumer(consumer.Consumer): - def __init__(self): - self.ready_queue = FastQueue() + def __init__(self, app): + self.app = app + self.buffer = FastQueue() + self.handle_task = self.buffer.put self.timer = Timer() - self.app = current_app self.event_dispatcher = Mock() self.controller = WorkController() self.task_consumer = Mock() + self.prefetch_multiplier = 1 + self.initial_prefetch_count = 1 from celery.concurrency.base import BasePool self.pool = BasePool(10) - - @property - def info(self): - return {'xyz': 'XYZ'} + self.task_buckets = defaultdict(lambda: None) -class test_ControlPanel(Case): +class test_Pidbox(AppCase): - def setUp(self): - self.app = current_app - self.panel = self.create_panel(consumer=Consumer()) + def test_shutdown(self): + with patch('celery.worker.pidbox.ignore_errors') as eig: + parent = Mock() + pbox = Pidbox(parent) + pbox._close_channel = Mock() + self.assertIs(pbox.c, parent) + pconsumer = pbox.consumer = Mock() + cancel = pconsumer.cancel + pbox.shutdown(parent) + eig.assert_called_with(parent, cancel) + pbox._close_channel.assert_called_with(parent) + + +class test_Pidbox_green(AppCase): + + def test_stop(self): + parent = Mock() + g = gPidbox(parent) + stopped = g._node_stopped = Mock() + shutdown = g._node_shutdown = Mock() + close_chan = g._close_channel = Mock() + + g.stop(parent) + shutdown.set.assert_called_with() + stopped.wait.assert_called_with() + close_chan.assert_called_with(parent) + self.assertIsNone(g._node_stopped) + self.assertIsNone(g._node_shutdown) + + close_chan.reset() + g.stop(parent) + close_chan.assert_called_with(parent) + + def test_resets(self): + parent = Mock() + g = gPidbox(parent) + g._resets = 100 + g.reset() + self.assertEqual(g._resets, 101) + + def test_loop(self): + parent = Mock() + conn = parent.connect.return_value = self.app.connection() + drain = conn.drain_events = Mock() + g = gPidbox(parent) + parent.connection = Mock() + do_reset = g._do_reset = Mock() + + call_count = [0] + + def se(*args, **kwargs): + if call_count[0] > 2: + g._node_shutdown.set() + g.reset() + call_count[0] += 1 + drain.side_effect = se + g.loop(parent) + + self.assertEqual(do_reset.call_count, 4) + + +class test_ControlPanel(AppCase): + + def setup(self): + self.panel = self.create_panel(consumer=Consumer(self.app)) + + @self.app.task(rate_limit=200, shared=False) + def mytask(): + pass + self.mytask = mytask def create_state(self, **kwargs): kwargs.setdefault('app', self.app) + kwargs.setdefault('hostname', hostname) return AttributeDict(kwargs) def create_panel(self, **kwargs): @@ -70,31 +135,63 @@ class test_ControlPanel(Case): handlers=Panel.data) def test_enable_events(self): - consumer = Consumer() + consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) - consumer.event_dispatcher.enabled = False + evd = consumer.event_dispatcher + evd.groups = set() panel.handle('enable_events') - self.assertTrue(consumer.event_dispatcher.enable.call_count) - self.assertIn( - ('worker-online', ), - consumer.event_dispatcher.send.call_args, - ) - consumer.event_dispatcher.enabled = True + self.assertIn('task', evd.groups) + evd.groups = set(['task']) self.assertIn('already enabled', panel.handle('enable_events')['ok']) def test_disable_events(self): - consumer = Consumer() + consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) - consumer.event_dispatcher.enabled = True + evd = consumer.event_dispatcher + evd.enabled = True + evd.groups = set(['task']) panel.handle('disable_events') - self.assertTrue(consumer.event_dispatcher.disable.call_count) - self.assertIn(('worker-offline', ), - consumer.event_dispatcher.send.call_args) - consumer.event_dispatcher.enabled = False + self.assertNotIn('task', evd.groups) self.assertIn('already disabled', panel.handle('disable_events')['ok']) + def test_clock(self): + consumer = Consumer(self.app) + panel = self.create_panel(consumer=consumer) + panel.state.app.clock.value = 313 + x = panel.handle('clock') + self.assertEqual(x['clock'], 313) + + def test_hello(self): + consumer = Consumer(self.app) + panel = self.create_panel(consumer=consumer) + panel.state.app.clock.value = 313 + worker_state.revoked.add('revoked1') + try: + x = panel.handle('hello', {'from_node': 'george@vandelay.com'}) + self.assertIn('revoked1', x['revoked']) + self.assertEqual(x['clock'], 314) # incremented + finally: + worker_state.revoked.discard('revoked1') + + def test_conf(self): + return + consumer = Consumer(self.app) + panel = self.create_panel(consumer=consumer) + self.app.conf.SOME_KEY6 = 'hello world' + x = panel.handle('dump_conf') + self.assertIn('SOME_KEY6', x) + + def test_election(self): + consumer = Consumer(self.app) + panel = self.create_panel(consumer=consumer) + consumer.gossip = Mock() + panel.handle( + 'election', {'id': 'id', 'topic': 'topic', 'action': 'action'}, + ) + consumer.gossip.election.assert_called_with('id', 'topic', 'action') + def test_heartbeat(self): - consumer = Consumer() + consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) consumer.event_dispatcher.enabled = True panel.handle('heartbeat') @@ -103,29 +200,29 @@ class test_ControlPanel(Case): def test_time_limit(self): panel = self.create_panel(consumer=Mock()) - th, ts = mytask.time_limit, mytask.soft_time_limit - try: - r = panel.handle('time_limit', arguments=dict( - task_name=mytask.name, hard=30, soft=10)) - self.assertEqual((mytask.time_limit, mytask.soft_time_limit), - (30, 10)) - self.assertIn('ok', r) - r = panel.handle('time_limit', arguments=dict( - task_name=mytask.name, hard=None, soft=None)) - self.assertEqual((mytask.time_limit, mytask.soft_time_limit), - (None, None)) - self.assertIn('ok', r) + r = panel.handle('time_limit', arguments=dict( + task_name=self.mytask.name, hard=30, soft=10)) + self.assertEqual( + (self.mytask.time_limit, self.mytask.soft_time_limit), + (30, 10), + ) + self.assertIn('ok', r) + r = panel.handle('time_limit', arguments=dict( + task_name=self.mytask.name, hard=None, soft=None)) + self.assertEqual( + (self.mytask.time_limit, self.mytask.soft_time_limit), + (None, None), + ) + self.assertIn('ok', r) - r = panel.handle('time_limit', arguments=dict( - task_name='248e8afya9s8dh921eh928', hard=30)) - self.assertIn('error', r) - finally: - mytask.time_limit, mytask.soft_time_limit = th, ts + r = panel.handle('time_limit', arguments=dict( + task_name='248e8afya9s8dh921eh928', hard=30)) + self.assertIn('error', r) def test_active_queues(self): import kombu - x = kombu.Consumer(current_app.connection(), + x = kombu.Consumer(self.app.connection(), [kombu.Queue('foo', kombu.Exchange('foo'), 'foo'), kombu.Queue('bar', kombu.Exchange('bar'), 'bar')], auto_declare=False) @@ -142,28 +239,28 @@ class test_ControlPanel(Case): self.assertIn('rate_limit=200', info) def test_stats(self): - prev_count, state.total_count = state.total_count, 100 + prev_count, worker_state.total_count = worker_state.total_count, 100 try: - self.assertDictContainsSubset({'total': 100, - 'consumer': {'xyz': 'XYZ'}}, + self.assertDictContainsSubset({'total': 100}, self.panel.handle('stats')) - self.panel.state.consumer = Mock() - self.panel.handle('stats') - self.assertTrue( - self.panel.state.consumer.controller.autoscaler.info.called) finally: - state.total_count = prev_count + worker_state.total_count = prev_count def test_report(self): self.panel.handle('report') def test_active(self): - r = TaskRequest(mytask.name, 'do re mi', (), {}) - state.active_requests.add(r) + r = Request({ + 'task': self.mytask.name, + 'id': 'do re mi', + 'args': (), + 'kwargs': {}, + }, app=self.app) + worker_state.active_requests.add(r) try: self.assertTrue(self.panel.handle('dump_active')) finally: - state.active_requests.discard(r) + worker_state.active_requests.discard(r) def test_pool_grow(self): @@ -178,14 +275,24 @@ class test_ControlPanel(Case): def shrink(self, n=1): self.size -= n - consumer = Consumer() - consumer.pool = MockPool() + @property + def num_processes(self): + return self.size + + consumer = Consumer(self.app) + consumer.prefetch_multiplier = 8 + consumer.qos = Mock(name='qos') + consumer.pool = MockPool(1) panel = self.create_panel(consumer=consumer) panel.handle('pool_grow') self.assertEqual(consumer.pool.size, 2) + consumer.qos.increment_eventually.assert_called_with(8) + self.assertEqual(consumer.initial_prefetch_count, 16) panel.handle('pool_shrink') self.assertEqual(consumer.pool.size, 1) + consumer.qos.decrement_eventually.assert_called_with(8) + self.assertEqual(consumer.initial_prefetch_count, 8) panel.state.consumer = Mock() panel.state.consumer.controller = Mock() @@ -214,7 +321,7 @@ class test_ControlPanel(Case): def consuming_from(self, queue): return queue in self.queues - consumer = Consumer() + consumer = Consumer(self.app) consumer.task_consumer = MockConsumer() panel = self.create_panel(consumer=consumer) @@ -226,58 +333,56 @@ class test_ControlPanel(Case): self.assertIn('MyQueue', consumer.task_consumer.cancelled) def test_revoked(self): - state.revoked.clear() - state.revoked.add('a1') - state.revoked.add('a2') + worker_state.revoked.clear() + worker_state.revoked.add('a1') + worker_state.revoked.add('a2') try: self.assertEqual(sorted(self.panel.handle('dump_revoked')), ['a1', 'a2']) finally: - state.revoked.clear() + worker_state.revoked.clear() def test_dump_schedule(self): - consumer = Consumer() + consumer = Consumer(self.app) panel = self.create_panel(consumer=consumer) self.assertFalse(panel.handle('dump_schedule')) - r = TaskRequest(mytask.name, 'CAFEBABE', (), {}) - consumer.timer.schedule.enter( + r = Request({ + 'task': self.mytask.name, + 'id': 'CAFEBABE', + 'args': (), + 'kwargs': {}, + }, app=self.app) + consumer.timer.schedule.enter_at( consumer.timer.Entry(lambda x: x, (r, )), datetime.now() + timedelta(seconds=10)) + consumer.timer.schedule.enter_at( + consumer.timer.Entry(lambda x: x, (object(), )), + datetime.now() + timedelta(seconds=10)) self.assertTrue(panel.handle('dump_schedule')) def test_dump_reserved(self): - from celery.worker import state - consumer = Consumer() - state.reserved_requests.add( - TaskRequest(mytask.name, uuid(), args=(2, 2), kwargs={}), - ) + consumer = Consumer(self.app) + worker_state.reserved_requests.add(Request({ + 'task': self.mytask.name, + 'id': uuid(), + 'args': (2, 2), + 'kwargs': {}, + }, app=self.app)) try: panel = self.create_panel(consumer=consumer) response = panel.handle('dump_reserved', {'safe': True}) self.assertDictContainsSubset( - {'name': mytask.name, + {'name': self.mytask.name, 'args': (2, 2), 'kwargs': {}, 'hostname': socket.gethostname()}, response[0], ) - state.reserved_requests.clear() + worker_state.reserved_requests.clear() self.assertFalse(panel.handle('dump_reserved')) finally: - state.reserved_requests.clear() - - def test_rate_limit_when_disabled(self): - app = current_app - app.conf.CELERY_DISABLE_RATE_LIMITS = True - try: - e = self.panel.handle( - 'rate_limit', - arguments={'task_name': mytask.name, - 'rate_limit': '100/m'}) - self.assertIn('rate limits disabled', e.get('error')) - finally: - app.conf.CELERY_DISABLE_RATE_LIMITS = False + worker_state.reserved_requests.clear() def test_rate_limit_invalid_rate_limit_string(self): e = self.panel.handle('rate_limit', arguments=dict( @@ -286,34 +391,25 @@ class test_ControlPanel(Case): def test_rate_limit(self): - class Consumer(object): + class xConsumer(object): + reset = False - class ReadyQueue(object): - fresh = False + def reset_rate_limits(self): + self.reset = True - def refresh(self): - self.fresh = True + consumer = xConsumer() + panel = self.create_panel(app=self.app, consumer=consumer) - def __init__(self): - self.ready_queue = self.ReadyQueue() - - consumer = Consumer() - panel = self.create_panel(app=current_app, consumer=consumer) - - task = current_app.tasks[mytask.name] - old_rate_limit = task.rate_limit - try: - panel.handle('rate_limit', arguments=dict(task_name=task.name, - rate_limit='100/m')) - self.assertEqual(task.rate_limit, '100/m') - self.assertTrue(consumer.ready_queue.fresh) - consumer.ready_queue.fresh = False - panel.handle('rate_limit', arguments=dict(task_name=task.name, - rate_limit=0)) - self.assertEqual(task.rate_limit, 0) - self.assertTrue(consumer.ready_queue.fresh) - finally: - task.rate_limit = old_rate_limit + task = self.app.tasks[self.mytask.name] + panel.handle('rate_limit', arguments=dict(task_name=task.name, + rate_limit='100/m')) + self.assertEqual(task.rate_limit, '100/m') + self.assertTrue(consumer.reset) + consumer.reset = False + panel.handle('rate_limit', arguments=dict(task_name=task.name, + rate_limit=0)) + self.assertEqual(task.rate_limit, 0) + self.assertTrue(consumer.reset) def test_rate_limit_nonexistant_task(self): self.panel.handle('rate_limit', arguments={ @@ -329,7 +425,7 @@ class test_ControlPanel(Case): m = {'method': 'revoke', 'destination': hostname, 'arguments': {'task_id': tid, - 'task_name': mytask.name}} + 'task_name': self.mytask.name}} self.panel.handle_message(m, None) self.assertIn(tid, revoked) @@ -359,17 +455,17 @@ class test_ControlPanel(Case): def test_revoke_terminate(self): request = Mock() request.id = tid = uuid() - state.reserved_requests.add(request) + worker_state.reserved_requests.add(request) try: r = control.revoke(Mock(), tid, terminate=True) self.assertIn(tid, revoked) self.assertTrue(request.terminate.call_count) - self.assertIn('terminating', r['ok']) + self.assertIn('terminate:', r['ok']) # unknown task id only revokes r = control.revoke(Mock(), uuid(), terminate=True) - self.assertIn('not found', r['ok']) + self.assertIn('tasks unknown', r['ok']) finally: - state.reserved_requests.discard(request) + worker_state.reserved_requests.discard(request) def test_autoscale(self): self.panel.state.consumer = Mock() @@ -390,7 +486,7 @@ class test_ControlPanel(Case): m = {'method': 'ping', 'destination': hostname} r = self.panel.handle_message(m, None) - self.assertEqual(r, 'pong') + self.assertEqual(r, {'ok': 'pong'}) def test_shutdown(self): m = {'method': 'shutdown', @@ -408,17 +504,17 @@ class test_ControlPanel(Case): replies.append(data) panel = _Node(hostname=hostname, - state=self.create_state(consumer=Consumer()), + state=self.create_state(consumer=Consumer(self.app)), handlers=Panel.data, mailbox=self.app.control.mailbox) r = panel.dispatch('ping', reply_to={'exchange': 'x', 'routing_key': 'x'}) - self.assertEqual(r, 'pong') - self.assertDictEqual(replies[0], {panel.hostname: 'pong'}) + self.assertEqual(r, {'ok': 'pong'}) + self.assertDictEqual(replies[0], {panel.hostname: {'ok': 'pong'}}) def test_pool_restart(self): - consumer = Consumer() - consumer.controller = _WC(app=current_app) + consumer = Consumer(self.app) + consumer.controller = _WC(app=self.app) consumer.controller.pool.restart = Mock() panel = self.create_panel(consumer=consumer) panel.app = self.app @@ -428,67 +524,59 @@ class test_ControlPanel(Case): with self.assertRaises(ValueError): panel.handle('pool_restart', {'reloader': _reload}) - current_app.conf.CELERYD_POOL_RESTARTS = True - try: - panel.handle('pool_restart', {'reloader': _reload}) - self.assertTrue(consumer.controller.pool.restart.called) - self.assertFalse(_reload.called) - self.assertFalse(_import.called) - finally: - current_app.conf.CELERYD_POOL_RESTARTS = False + self.app.conf.CELERYD_POOL_RESTARTS = True + panel.handle('pool_restart', {'reloader': _reload}) + self.assertTrue(consumer.controller.pool.restart.called) + self.assertFalse(_reload.called) + self.assertFalse(_import.called) def test_pool_restart_import_modules(self): - consumer = Consumer() - consumer.controller = _WC(app=current_app) + consumer = Consumer(self.app) + consumer.controller = _WC(app=self.app) consumer.controller.pool.restart = Mock() panel = self.create_panel(consumer=consumer) panel.app = self.app _import = consumer.controller.app.loader.import_from_cwd = Mock() _reload = Mock() - current_app.conf.CELERYD_POOL_RESTARTS = True - try: - panel.handle('pool_restart', {'modules': ['foo', 'bar'], - 'reloader': _reload}) - self.assertTrue(consumer.controller.pool.restart.called) - self.assertFalse(_reload.called) - self.assertEqual( - [(('foo',), {}), (('bar',), {})], - _import.call_args_list, - ) - finally: - current_app.conf.CELERYD_POOL_RESTARTS = False + self.app.conf.CELERYD_POOL_RESTARTS = True + panel.handle('pool_restart', {'modules': ['foo', 'bar'], + 'reloader': _reload}) - def test_pool_restart_relaod_modules(self): - consumer = Consumer() - consumer.controller = _WC(app=current_app) + self.assertTrue(consumer.controller.pool.restart.called) + self.assertFalse(_reload.called) + self.assertItemsEqual( + [call('bar'), call('foo')], + _import.call_args_list, + ) + + def test_pool_restart_reload_modules(self): + consumer = Consumer(self.app) + consumer.controller = _WC(app=self.app) consumer.controller.pool.restart = Mock() panel = self.create_panel(consumer=consumer) panel.app = self.app _import = panel.app.loader.import_from_cwd = Mock() _reload = Mock() - current_app.conf.CELERYD_POOL_RESTARTS = True - try: - with patch.dict(sys.modules, {'foo': None}): - panel.handle('pool_restart', {'modules': ['foo'], - 'reload': False, - 'reloader': _reload}) + self.app.conf.CELERYD_POOL_RESTARTS = True + with patch.dict(sys.modules, {'foo': None}): + panel.handle('pool_restart', {'modules': ['foo'], + 'reload': False, + 'reloader': _reload}) - self.assertTrue(consumer.controller.pool.restart.called) - self.assertFalse(_reload.called) - self.assertFalse(_import.called) + self.assertTrue(consumer.controller.pool.restart.called) + self.assertFalse(_reload.called) + self.assertFalse(_import.called) - _import.reset_mock() - _reload.reset_mock() - consumer.controller.pool.restart.reset_mock() + _import.reset_mock() + _reload.reset_mock() + consumer.controller.pool.restart.reset_mock() - panel.handle('pool_restart', {'modules': ['foo'], - 'reload': True, - 'reloader': _reload}) + panel.handle('pool_restart', {'modules': ['foo'], + 'reload': True, + 'reloader': _reload}) - self.assertTrue(consumer.controller.pool.restart.called) - self.assertTrue(_reload.called) - self.assertFalse(_import.called) - finally: - current_app.conf.CELERYD_POOL_RESTARTS = False + self.assertTrue(consumer.controller.pool.restart.called) + self.assertTrue(_reload.called) + self.assertFalse(_import.called) diff --git a/awx/lib/site-packages/celery/tests/worker/test_heartbeat.py b/awx/lib/site-packages/celery/tests/worker/test_heartbeat.py index 1446ce65ac..5568e4ec4c 100644 --- a/awx/lib/site-packages/celery/tests/worker/test_heartbeat.py +++ b/awx/lib/site-packages/celery/tests/worker/test_heartbeat.py @@ -1,7 +1,7 @@ from __future__ import absolute_import from celery.worker.heartbeat import Heart -from celery.tests.utils import Case, sleepdeprived +from celery.tests.case import AppCase class MockDispatcher(object): @@ -31,7 +31,7 @@ class MockDispatcherRaising(object): class MockTimer(object): - def apply_interval(self, msecs, fun, args=(), kwargs={}): + def call_repeatedly(self, secs, fun, args=(), kwargs={}): class entry(tuple): cancelled = False @@ -39,15 +39,15 @@ class MockTimer(object): def cancel(self): self.cancelled = True - return entry((msecs, fun, args, kwargs)) + return entry((secs, fun, args, kwargs)) def cancel(self, entry): entry.cancel() -class test_Heart(Case): +class test_Heart(AppCase): - def test_stop(self): + def test_start_stop(self): timer = MockTimer() eventer = MockDispatcher() h = Heart(timer, eventer, interval=1) @@ -57,16 +57,17 @@ class test_Heart(Case): self.assertIsNone(h.tref) h.stop() - @sleepdeprived - def test_run_manages_cycle(self): + def test_start_when_disabled(self): + timer = MockTimer() eventer = MockDispatcher() - heart = Heart(MockTimer(), eventer, interval=0.1) - eventer.heart = heart - heart.start() - msecs, fun, args, kwargs = tref = heart.tref - self.assertEqual(msecs, 0.1 * 1000) - self.assertEqual(tref.fun, eventer.send) - self.assertTrue(tref.args) - self.assertTrue(tref.kwargs) - heart.stop() - self.assertTrue(tref.cancelled) + eventer.enabled = False + h = Heart(timer, eventer) + h.start() + self.assertFalse(h.tref) + + def test_stop_when_disabled(self): + timer = MockTimer() + eventer = MockDispatcher() + eventer.enabled = False + h = Heart(timer, eventer) + h.stop() diff --git a/awx/lib/site-packages/celery/tests/worker/test_hub.py b/awx/lib/site-packages/celery/tests/worker/test_hub.py index e66ecb7a09..8a4328c6e3 100644 --- a/awx/lib/site-packages/celery/tests/worker/test_hub.py +++ b/awx/lib/site-packages/celery/tests/worker/test_hub.py @@ -1,15 +1,11 @@ from __future__ import absolute_import -from __future__ import with_statement -from celery.worker.hub import ( - DummyLock, - BoundedSemaphore, - Hub, -) +from kombu.async import Hub, READ, WRITE, ERR +from kombu.async.hub import repr_flag, _rcb +from kombu.async.semaphore import DummyLock, LaxBoundedSemaphore -from mock import Mock, call, patch - -from celery.tests.utils import Case +from celery.five import range +from celery.tests.case import Case, Mock, call, patch class File(object): @@ -25,6 +21,9 @@ class File(object): return self.fd == other.fd return NotImplemented + def __hash__(self): + return hash(self.fd) + class test_DummyLock(Case): @@ -34,10 +33,10 @@ class test_DummyLock(Case): pass -class test_BoundedSemaphore(Case): +class test_LaxBoundedSemaphore(Case): def test_acquire_release(self): - x = BoundedSemaphore(2) + x = LaxBoundedSemaphore(2) c1 = Mock() x.acquire(c1, 1) @@ -59,13 +58,13 @@ class test_BoundedSemaphore(Case): c3.assert_called_with(3) def test_bounded(self): - x = BoundedSemaphore(2) - for i in xrange(100): + x = LaxBoundedSemaphore(2) + for i in range(100): x.release() self.assertEqual(x.value, 2) def test_grow_shrink(self): - x = BoundedSemaphore(1) + x = LaxBoundedSemaphore(1) self.assertEqual(x.initial_value, 1) cb1 = Mock() x.acquire(cb1, 1) @@ -89,24 +88,24 @@ class test_BoundedSemaphore(Case): self.assertFalse(x._waiting) x.grow(3) - for i in xrange(x.initial_value): + for i in range(x.initial_value): self.assertTrue(x.acquire(Mock())) self.assertFalse(x.acquire(Mock())) x.clear() x.shrink(3) - for i in xrange(x.initial_value): + for i in range(x.initial_value): self.assertTrue(x.acquire(Mock())) self.assertFalse(x.acquire(Mock())) self.assertEqual(x.value, 0) - for i in xrange(100): + for i in range(100): x.release() self.assertEqual(x.value, x.initial_value) def test_clear(self): - x = BoundedSemaphore(10) - for i in xrange(11): + x = LaxBoundedSemaphore(10) + for i in range(11): x.acquire(Mock()) self.assertTrue(x._waiting) self.assertEqual(x.value, 0) @@ -118,24 +117,32 @@ class test_BoundedSemaphore(Case): class test_Hub(Case): - @patch('kombu.utils.eventio.poll') + def test_repr_flag(self): + self.assertEqual(repr_flag(READ), 'R') + self.assertEqual(repr_flag(WRITE), 'W') + self.assertEqual(repr_flag(ERR), '!') + self.assertEqual(repr_flag(READ | WRITE), 'RW') + self.assertEqual(repr_flag(READ | ERR), 'R!') + self.assertEqual(repr_flag(WRITE | ERR), 'W!') + self.assertEqual(repr_flag(READ | WRITE | ERR), 'RW!') + + def test_repr_callback_rcb(self): + + def f(): + pass + + self.assertEqual(_rcb(f), f.__name__) + self.assertEqual(_rcb('foo'), 'foo') + + @patch('kombu.async.hub.poll') def test_start_stop(self, poll): hub = Hub() - hub.start() poll.assert_called_with() + poller = hub.poller hub.stop() - hub.poller.close.assert_called_with() - - def test_init(self): - hub = Hub() - cb1 = Mock() - cb2 = Mock() - hub.on_init.extend([cb1, cb2]) - - hub.init() - cb1.assert_called_with(hub) - cb2.assert_called_with(hub) + hub.close() + poller.close.assert_called_with() def test_fire_timers(self): hub = Hub() @@ -145,8 +152,7 @@ class test_Hub(Case): max_delay=32.321), 32.321) hub.timer._queue = [1] - hub.scheduler = Mock() - hub.scheduler.next.return_value = 3.743, None + hub.scheduler = iter([(3.743, None)]) self.assertEqual(hub.fire_timers(), 3.743) e1, e2, e3 = Mock(), Mock(), Mock() @@ -155,18 +161,18 @@ class test_Hub(Case): reset = lambda: [m.reset() for m in [e1, e2, e3]] def se(): - if entries: - return None, entries.pop() - return 3.982, None - hub.scheduler.next = Mock() - hub.scheduler.next.side_effect = se + while 1: + while entries: + yield None, entries.pop() + yield 3.982, None + hub.scheduler = se() self.assertEqual(hub.fire_timers(max_timers=10), 3.982) for E in [e3, e2, e1]: E.assert_called_with() reset() - entries[:] = [Mock() for _ in xrange(11)] + entries[:] = [Mock() for _ in range(11)] keep = list(entries) self.assertEqual(hub.fire_timers(max_timers=10, min_delay=1.13), 1.13) for E in reversed(keep[1:]): @@ -175,28 +181,88 @@ class test_Hub(Case): self.assertEqual(hub.fire_timers(max_timers=10), 3.982) keep[0].assert_called_with() - def test_update_readers(self): + def test_fire_timers_raises(self): + hub = Hub() + eback = Mock() + eback.side_effect = KeyError('foo') + hub.timer = Mock() + hub.scheduler = iter([(0, eback)]) + with self.assertRaises(KeyError): + hub.fire_timers(propagate=(KeyError, )) + + eback.side_effect = ValueError('foo') + hub.scheduler = iter([(0, eback)]) + with patch('kombu.async.hub.logger') as logger: + with self.assertRaises(StopIteration): + hub.fire_timers() + self.assertTrue(logger.error.called) + + def test_add_raises_ValueError(self): + hub = Hub() + hub.poller = Mock(name='hub.poller') + hub.poller.register.side_effect = ValueError() + hub._discard = Mock(name='hub.discard') + with self.assertRaises(ValueError): + hub.add(2, Mock(), READ) + hub._discard.assert_called_with(2) + + def test_repr_active(self): + hub = Hub() + hub.readers = {1: Mock(), 2: Mock()} + hub.writers = {3: Mock(), 4: Mock()} + for value in list(hub.readers.values()) + list(hub.writers.values()): + value.__name__ = 'mock' + self.assertTrue(hub.repr_active()) + + def test_repr_events(self): + hub = Hub() + hub.readers = {6: Mock(), 7: Mock(), 8: Mock()} + hub.writers = {9: Mock()} + for value in list(hub.readers.values()) + list(hub.writers.values()): + value.__name__ = 'mock' + self.assertTrue(hub.repr_events([ + (6, READ), + (7, ERR), + (8, READ | ERR), + (9, WRITE), + (10, 13213), + ])) + + def test_callback_for(self): + hub = Hub() + reader, writer = Mock(), Mock() + hub.readers = {6: reader} + hub.writers = {7: writer} + + self.assertEqual(hub._callback_for(6, READ), reader) + self.assertEqual(hub._callback_for(7, WRITE), writer) + with self.assertRaises(KeyError): + hub._callback_for(6, WRITE) + self.assertEqual(hub._callback_for(6, WRITE, 'foo'), 'foo') + + def test_add_remove_readers(self): hub = Hub() P = hub.poller = Mock() read_A = Mock() read_B = Mock() - hub.update_readers({10: read_A, File(11): read_B}) + hub.add_reader(10, read_A, 10) + hub.add_reader(File(11), read_B, 11) P.register.assert_has_calls([ call(10, hub.READ | hub.ERR), call(File(11), hub.READ | hub.ERR), ], any_order=True) - self.assertIs(hub.readers[10], read_A) - self.assertIs(hub.readers[11], read_B) + self.assertEqual(hub.readers[10], (read_A, (10, ))) + self.assertEqual(hub.readers[11], (read_B, (11, ))) hub.remove(10) self.assertNotIn(10, hub.readers) hub.remove(File(11)) self.assertNotIn(11, hub.readers) P.unregister.assert_has_calls([ - call(10), call(File(11)), + call(10), call(11), ]) def test_can_remove_unknown_fds(self): @@ -212,49 +278,50 @@ class test_Hub(Case): hub.remove(313) - def test_update_writers(self): + def test_add_writers(self): hub = Hub() P = hub.poller = Mock() write_A = Mock() write_B = Mock() - hub.update_writers({20: write_A, File(21): write_B}) + hub.add_writer(20, write_A) + hub.add_writer(File(21), write_B) P.register.assert_has_calls([ call(20, hub.WRITE), call(File(21), hub.WRITE), ], any_order=True) - self.assertIs(hub.writers[20], write_A) - self.assertIs(hub.writers[21], write_B) + self.assertEqual(hub.writers[20], (write_A, ())) + self.assertEqual(hub.writers[21], (write_B, ())) hub.remove(20) self.assertNotIn(20, hub.writers) hub.remove(File(21)) self.assertNotIn(21, hub.writers) P.unregister.assert_has_calls([ - call(20), call(File(21)), + call(20), call(21), ]) def test_enter__exit(self): hub = Hub() P = hub.poller = Mock() - hub.init = Mock() - on_close = Mock() - hub.on_close.append(on_close) - - with hub: - hub.init.assert_called_with() + hub.on_close.add(on_close) + try: read_A = Mock() read_B = Mock() - hub.update_readers({10: read_A, File(11): read_B}) + hub.add_reader(10, read_A) + hub.add_reader(File(11), read_B) write_A = Mock() write_B = Mock() - hub.update_writers({20: write_A, File(21): write_B}) + hub.add_writer(20, write_A) + hub.add_writer(File(21), write_B) self.assertTrue(hub.readers) self.assertTrue(hub.writers) + finally: + hub.close() self.assertFalse(hub.readers) self.assertFalse(hub.writers) diff --git a/awx/lib/site-packages/celery/tests/worker/test_loops.py b/awx/lib/site-packages/celery/tests/worker/test_loops.py new file mode 100644 index 0000000000..48653d50d0 --- /dev/null +++ b/awx/lib/site-packages/celery/tests/worker/test_loops.py @@ -0,0 +1,403 @@ +from __future__ import absolute_import + +import socket + +from kombu.async import Hub, READ, WRITE, ERR + +from celery.bootsteps import CLOSE, RUN +from celery.exceptions import InvalidTaskError, SystemTerminate +from celery.five import Empty +from celery.worker import state +from celery.worker.consumer import Consumer +from celery.worker.loops import asynloop, synloop + +from celery.tests.case import AppCase, Mock, body_from_sig + + +class X(object): + + def __init__(self, app, heartbeat=None, on_task_message=None): + hub = Hub() + ( + self.obj, + self.connection, + self.consumer, + self.blueprint, + self.hub, + self.qos, + self.heartbeat, + self.clock, + ) = self.args = [Mock(name='obj'), + Mock(name='connection'), + Mock(name='consumer'), + Mock(name='blueprint'), + hub, + Mock(name='qos'), + heartbeat, + Mock(name='clock')] + self.connection.supports_heartbeats = True + self.consumer.callbacks = [] + self.obj.strategies = {} + self.connection.connection_errors = (socket.error, ) + self.hub.readers = {} + self.hub.writers = {} + self.hub.consolidate = set() + self.hub.timer = Mock(name='hub.timer') + self.hub.timer._queue = [Mock()] + self.hub.fire_timers = Mock(name='hub.fire_timers') + self.hub.fire_timers.return_value = 1.7 + self.hub.poller = Mock(name='hub.poller') + self.hub.close = Mock(name='hub.close()') # asynloop calls hub.close + self.Hub = self.hub + self.blueprint.state = RUN + # need this for create_task_handler + _consumer = Consumer(Mock(), timer=Mock(), app=app) + _consumer.on_task_message = on_task_message or [] + self.obj.create_task_handler = _consumer.create_task_handler + self.on_unknown_message = self.obj.on_unknown_message = Mock( + name='on_unknown_message', + ) + _consumer.on_unknown_message = self.on_unknown_message + self.on_unknown_task = self.obj.on_unknown_task = Mock( + name='on_unknown_task', + ) + _consumer.on_unknown_task = self.on_unknown_task + self.on_invalid_task = self.obj.on_invalid_task = Mock( + name='on_invalid_task', + ) + _consumer.on_invalid_task = self.on_invalid_task + _consumer.strategies = self.obj.strategies + + def timeout_then_error(self, mock): + + def first(*args, **kwargs): + mock.side_effect = socket.error() + self.connection.more_to_read = False + raise socket.timeout() + mock.side_effect = first + + def close_then_error(self, mock=None, mod=0, exc=None): + mock = Mock() if mock is None else mock + + def first(*args, **kwargs): + if not mod or mock.call_count > mod: + self.close() + self.connection.more_to_read = False + raise (socket.error() if exc is None else exc) + mock.side_effect = first + return mock + + def close(self, *args, **kwargs): + self.blueprint.state = CLOSE + + def closer(self, mock=None, mod=0): + mock = Mock() if mock is None else mock + + def closing(*args, **kwargs): + if not mod or mock.call_count >= mod: + self.close() + mock.side_effect = closing + return mock + + +def get_task_callback(*args, **kwargs): + x = X(*args, **kwargs) + x.blueprint.state = CLOSE + asynloop(*x.args) + return x, x.consumer.callbacks[0] + + +class test_asynloop(AppCase): + + def setup(self): + + @self.app.task(shared=False) + def add(x, y): + return x + y + self.add = add + + def test_setup_heartbeat(self): + x = X(self.app, heartbeat=10) + x.hub.call_repeatedly = Mock(name='x.hub.call_repeatedly()') + x.blueprint.state = CLOSE + asynloop(*x.args) + x.consumer.consume.assert_called_with() + x.obj.on_ready.assert_called_with() + x.hub.call_repeatedly.assert_called_with( + 10 / 2.0, x.connection.heartbeat_check, 2.0, + ) + + def task_context(self, sig, **kwargs): + x, on_task = get_task_callback(self.app, **kwargs) + body = body_from_sig(self.app, sig) + message = Mock() + strategy = x.obj.strategies[sig.task] = Mock() + return x, on_task, body, message, strategy + + def test_on_task_received(self): + _, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2)) + on_task(body, msg) + strategy.assert_called_with( + msg, body, msg.ack_log_error, msg.reject_log_error, [], + ) + + def test_on_task_received_executes_on_task_message(self): + cbs = [Mock(), Mock(), Mock()] + _, on_task, body, msg, strategy = self.task_context( + self.add.s(2, 2), on_task_message=cbs, + ) + on_task(body, msg) + strategy.assert_called_with( + msg, body, msg.ack_log_error, msg.reject_log_error, cbs, + ) + + def test_on_task_message_missing_name(self): + x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2)) + body.pop('task') + on_task(body, msg) + x.on_unknown_message.assert_called_with(body, msg) + + def test_on_task_not_registered(self): + x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2)) + exc = strategy.side_effect = KeyError(self.add.name) + on_task(body, msg) + x.on_unknown_task.assert_called_with(body, msg, exc) + + def test_on_task_InvalidTaskError(self): + x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2)) + exc = strategy.side_effect = InvalidTaskError() + on_task(body, msg) + x.on_invalid_task.assert_called_with(body, msg, exc) + + def test_should_terminate(self): + x = X(self.app) + # XXX why aren't the errors propagated?!? + state.should_terminate = True + try: + with self.assertRaises(SystemTerminate): + asynloop(*x.args) + finally: + state.should_terminate = False + + def test_should_terminate_hub_close_raises(self): + x = X(self.app) + # XXX why aren't the errors propagated?!? + state.should_terminate = True + x.hub.close.side_effect = MemoryError() + try: + with self.assertRaises(SystemTerminate): + asynloop(*x.args) + finally: + state.should_terminate = False + + def test_should_stop(self): + x = X(self.app) + state.should_stop = True + try: + with self.assertRaises(SystemExit): + asynloop(*x.args) + finally: + state.should_stop = False + + def test_updates_qos(self): + x = X(self.app) + x.qos.prev = 3 + x.qos.value = 3 + x.hub.on_tick.add(x.closer(mod=2)) + x.hub.timer._queue = [1] + asynloop(*x.args) + self.assertFalse(x.qos.update.called) + + x = X(self.app) + x.qos.prev = 1 + x.qos.value = 6 + x.hub.on_tick.add(x.closer(mod=2)) + asynloop(*x.args) + x.qos.update.assert_called_with() + x.hub.fire_timers.assert_called_with(propagate=(socket.error, )) + + def test_poll_empty(self): + x = X(self.app) + x.hub.readers = {6: Mock()} + x.hub.timer._queue = [1] + x.close_then_error(x.hub.poller.poll) + x.hub.fire_timers.return_value = 33.37 + x.hub.poller.poll.return_value = [] + with self.assertRaises(socket.error): + asynloop(*x.args) + x.hub.poller.poll.assert_called_with(33.37) + + def test_poll_readable(self): + x = X(self.app) + reader = Mock(name='reader') + x.hub.add_reader(6, reader, 6) + x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), mod=4)) + x.hub.poller.poll.return_value = [(6, READ)] + with self.assertRaises(socket.error): + asynloop(*x.args) + reader.assert_called_with(6) + self.assertTrue(x.hub.poller.poll.called) + + def test_poll_readable_raises_Empty(self): + x = X(self.app) + reader = Mock(name='reader') + x.hub.add_reader(6, reader, 6) + x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) + x.hub.poller.poll.return_value = [(6, READ)] + reader.side_effect = Empty() + with self.assertRaises(socket.error): + asynloop(*x.args) + reader.assert_called_with(6) + self.assertTrue(x.hub.poller.poll.called) + + def test_poll_writable(self): + x = X(self.app) + writer = Mock(name='writer') + x.hub.add_writer(6, writer, 6) + x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) + x.hub.poller.poll.return_value = [(6, WRITE)] + with self.assertRaises(socket.error): + asynloop(*x.args) + writer.assert_called_with(6) + self.assertTrue(x.hub.poller.poll.called) + + def test_poll_writable_none_registered(self): + x = X(self.app) + writer = Mock(name='writer') + x.hub.add_writer(6, writer, 6) + x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) + x.hub.poller.poll.return_value = [(7, WRITE)] + with self.assertRaises(socket.error): + asynloop(*x.args) + self.assertTrue(x.hub.poller.poll.called) + + def test_poll_unknown_event(self): + x = X(self.app) + writer = Mock(name='reader') + x.hub.add_writer(6, writer, 6) + x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) + x.hub.poller.poll.return_value = [(6, 0)] + with self.assertRaises(socket.error): + asynloop(*x.args) + self.assertTrue(x.hub.poller.poll.called) + + def test_poll_keep_draining_disabled(self): + x = X(self.app) + x.hub.writers = {6: Mock()} + poll = x.hub.poller.poll + + def se(*args, **kwargs): + poll.side_effect = socket.error() + poll.side_effect = se + + x.hub.poller.poll.return_value = [(6, 0)] + with self.assertRaises(socket.error): + asynloop(*x.args) + self.assertTrue(x.hub.poller.poll.called) + + def test_poll_err_writable(self): + x = X(self.app) + writer = Mock(name='writer') + x.hub.add_writer(6, writer, 6, 48) + x.hub.on_tick.add(x.close_then_error(Mock(), 2)) + x.hub.poller.poll.return_value = [(6, ERR)] + with self.assertRaises(socket.error): + asynloop(*x.args) + writer.assert_called_with(6, 48) + self.assertTrue(x.hub.poller.poll.called) + + def test_poll_write_generator(self): + x = X(self.app) + x.hub.remove = Mock(name='hub.remove()') + + def Gen(): + yield 1 + yield 2 + gen = Gen() + + x.hub.add_writer(6, gen) + x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) + x.hub.poller.poll.return_value = [(6, WRITE)] + with self.assertRaises(socket.error): + asynloop(*x.args) + self.assertTrue(gen.gi_frame.f_lasti != -1) + self.assertFalse(x.hub.remove.called) + + def test_poll_write_generator_stopped(self): + x = X(self.app) + + def Gen(): + raise StopIteration() + yield + gen = Gen() + x.hub.add_writer(6, gen) + x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) + x.hub.poller.poll.return_value = [(6, WRITE)] + x.hub.remove = Mock(name='hub.remove()') + with self.assertRaises(socket.error): + asynloop(*x.args) + self.assertIsNone(gen.gi_frame) + + def test_poll_write_generator_raises(self): + x = X(self.app) + + def Gen(): + raise ValueError('foo') + yield + gen = Gen() + x.hub.add_writer(6, gen) + x.hub.remove = Mock(name='hub.remove()') + x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) + x.hub.poller.poll.return_value = [(6, WRITE)] + with self.assertRaises(ValueError): + asynloop(*x.args) + self.assertIsNone(gen.gi_frame) + x.hub.remove.assert_called_with(6) + + def test_poll_err_readable(self): + x = X(self.app) + reader = Mock(name='reader') + x.hub.add_reader(6, reader, 6, 24) + x.hub.on_tick.add(x.close_then_error(Mock(), 2)) + x.hub.poller.poll.return_value = [(6, ERR)] + with self.assertRaises(socket.error): + asynloop(*x.args) + reader.assert_called_with(6, 24) + self.assertTrue(x.hub.poller.poll.called) + + def test_poll_raises_ValueError(self): + x = X(self.app) + x.hub.readers = {6: Mock()} + x.close_then_error(x.hub.poller.poll, exc=ValueError) + asynloop(*x.args) + self.assertTrue(x.hub.poller.poll.called) + + +class test_synloop(AppCase): + + def test_timeout_ignored(self): + x = X(self.app) + x.timeout_then_error(x.connection.drain_events) + with self.assertRaises(socket.error): + synloop(*x.args) + self.assertEqual(x.connection.drain_events.call_count, 2) + + def test_updates_qos_when_changed(self): + x = X(self.app) + x.qos.prev = 2 + x.qos.value = 2 + x.timeout_then_error(x.connection.drain_events) + with self.assertRaises(socket.error): + synloop(*x.args) + self.assertFalse(x.qos.update.called) + + x.qos.value = 4 + x.timeout_then_error(x.connection.drain_events) + with self.assertRaises(socket.error): + synloop(*x.args) + x.qos.update.assert_called_with() + + def test_ignores_socket_errors_when_closed(self): + x = X(self.app) + x.close_then_error(x.connection.drain_events) + self.assertIsNone(synloop(*x.args)) diff --git a/awx/lib/site-packages/celery/tests/worker/test_mediator.py b/awx/lib/site-packages/celery/tests/worker/test_mediator.py deleted file mode 100644 index 66511f6162..0000000000 --- a/awx/lib/site-packages/celery/tests/worker/test_mediator.py +++ /dev/null @@ -1,113 +0,0 @@ -from __future__ import absolute_import - -import sys - -from Queue import Queue - -from mock import Mock, patch - -from celery.worker.mediator import Mediator -from celery.worker.state import revoked as revoked_tasks -from celery.tests.utils import Case - - -class MockTask(object): - hostname = 'harness.com' - id = 1234 - name = 'mocktask' - - def __init__(self, value, **kwargs): - self.value = value - - on_ack = Mock() - - def revoked(self): - if self.id in revoked_tasks: - self.on_ack() - return True - return False - - -class test_Mediator(Case): - - def test_mediator_start__stop(self): - ready_queue = Queue() - m = Mediator(ready_queue, lambda t: t) - m.start() - self.assertFalse(m._is_shutdown.isSet()) - self.assertFalse(m._is_stopped.isSet()) - m.stop() - m.join() - self.assertTrue(m._is_shutdown.isSet()) - self.assertTrue(m._is_stopped.isSet()) - - def test_mediator_body(self): - ready_queue = Queue() - got = {} - - def mycallback(value): - got['value'] = value.value - - m = Mediator(ready_queue, mycallback) - ready_queue.put(MockTask('George Costanza')) - - m.body() - - self.assertEqual(got['value'], 'George Costanza') - - ready_queue.put(MockTask('Jerry Seinfeld')) - m._does_debug = False - m.body() - self.assertEqual(got['value'], 'Jerry Seinfeld') - - @patch('os._exit') - def test_mediator_crash(self, _exit): - ms = [None] - - class _Mediator(Mediator): - - def body(self): - try: - raise KeyError('foo') - finally: - ms[0]._is_shutdown.set() - - ready_queue = Queue() - ms[0] = m = _Mediator(ready_queue, None) - ready_queue.put(MockTask('George Constanza')) - - stderr = Mock() - p, sys.stderr = sys.stderr, stderr - try: - m.run() - finally: - sys.stderr = p - self.assertTrue(_exit.call_count) - self.assertTrue(stderr.write.call_count) - - def test_mediator_body_exception(self): - ready_queue = Queue() - - def mycallback(value): - raise KeyError('foo') - - m = Mediator(ready_queue, mycallback) - ready_queue.put(MockTask('Elaine M. Benes')) - - m.body() - - def test_run(self): - ready_queue = Queue() - - condition = [None] - - def mycallback(value): - condition[0].set() - - m = Mediator(ready_queue, mycallback) - condition[0] = m._is_shutdown - ready_queue.put(MockTask('Elaine M. Benes')) - - m.run() - self.assertTrue(m._is_shutdown.isSet()) - self.assertTrue(m._is_stopped.isSet()) diff --git a/awx/lib/site-packages/celery/tests/worker/test_request.py b/awx/lib/site-packages/celery/tests/worker/test_request.py index b83768bf3d..9b46a27ea7 100644 --- a/awx/lib/site-packages/celery/tests/worker/test_request.py +++ b/awx/lib/site-packages/celery/tests/worker/test_request.py @@ -1,32 +1,20 @@ # -*- coding: utf-8 -*- -from __future__ import absolute_import -from __future__ import with_statement +from __future__ import absolute_import, unicode_literals import anyjson import os import signal +import socket import sys -import time from datetime import datetime, timedelta +from billiard.einfo import ExceptionInfo from kombu.transport.base import Message from kombu.utils.encoding import from_utf8, default_encode -from mock import Mock, patch -from nose import SkipTest -from celery import current_app from celery import states -from celery.app import app_or_default -from celery.concurrency.base import BasePool -from celery.datastructures import ExceptionInfo -from celery.exceptions import ( - RetryTaskError, - WorkerLostError, - InvalidTaskError, - TaskRevokedError, -) -from celery.task.trace import ( +from celery.app.trace import ( trace_task, _trace_task_ret, TraceInfo, @@ -35,19 +23,31 @@ from celery.task.trace import ( setup_worker_optimizations, reset_worker_optimizations, ) -from celery.result import AsyncResult +from celery.concurrency.base import BasePool +from celery.exceptions import ( + Ignore, + InvalidTaskError, + Retry, + TaskRevokedError, + Terminated, + WorkerLostError, +) +from celery.five import keys, monotonic from celery.signals import task_revoked -from celery.task import task as task_dec -from celery.task.base import Task from celery.utils import uuid from celery.worker import job as module -from celery.worker.job import Request, TaskRequest +from celery.worker.job import Request, logger as req_logger from celery.worker.state import revoked -from celery.tests.utils import AppCase, Case, assert_signal_called - -scratch = {'ACK': False} -some_kwargs_scratchpad = {} +from celery.tests.case import ( + AppCase, + Case, + Mock, + SkipTest, + assert_signal_called, + body_from_sig, + patch, +) class test_mro_lookup(Case): @@ -79,152 +79,121 @@ class test_mro_lookup(Case): self.assertIsNone(mro_lookup(D, 'x')) -def jail(task_id, name, args, kwargs): +def jail(app, task_id, name, args, kwargs): request = {'id': task_id} - task = current_app.tasks[name] + task = app.tasks[name] task.__trace__ = None # rebuild return trace_task( - task, task_id, args, kwargs, request=request, eager=False, + task, task_id, args, kwargs, request=request, eager=False, app=app, ) -def on_ack(*args, **kwargs): - scratch['ACK'] = True +class test_default_encode(AppCase): - -@task_dec(accept_magic_kwargs=False) -def mytask(i, **kwargs): - return i ** i - - -@task_dec # traverses coverage for decorator without parens -def mytask_no_kwargs(i): - return i ** i - - -class MyTaskIgnoreResult(Task): - ignore_result = True - - def run(self, i): - return i ** i - - -@task_dec(accept_magic_kwargs=True) -def mytask_some_kwargs(i, task_id): - some_kwargs_scratchpad['task_id'] = task_id - return i ** i - - -@task_dec(accept_magic_kwargs=False) -def mytask_raising(i): - raise KeyError(i) - - -class test_default_encode(Case): - - def setUp(self): + def setup(self): if sys.version_info >= (3, 0): raise SkipTest('py3k: not relevant') def test_jython(self): prev, sys.platform = sys.platform, 'java 1.6.1' try: - self.assertEqual(default_encode('foo'), 'foo') + self.assertEqual(default_encode(bytes('foo')), 'foo') finally: sys.platform = prev - def test_cython(self): + def test_cpython(self): prev, sys.platform = sys.platform, 'darwin' gfe, sys.getfilesystemencoding = ( sys.getfilesystemencoding, lambda: 'utf-8', ) try: - self.assertEqual(default_encode('foo'), 'foo') + self.assertEqual(default_encode(bytes('foo')), 'foo') finally: sys.platform = prev sys.getfilesystemencoding = gfe -class test_RetryTaskError(Case): +class test_Retry(AppCase): - def test_retry_task_error(self): + def test_retry_semipredicate(self): try: raise Exception('foo') - except Exception, exc: - ret = RetryTaskError('Retrying task', exc) + except Exception as exc: + ret = Retry('Retrying task', exc) self.assertEqual(ret.exc, exc) -class test_trace_task(Case): +class test_trace_task(AppCase): - @patch('celery.task.trace._logger') + def setup(self): + + @self.app.task(shared=False) + def mytask(i, **kwargs): + return i ** i + self.mytask = mytask + + @self.app.task(shared=False) + def mytask_raising(i): + raise KeyError(i) + self.mytask_raising = mytask_raising + + @patch('celery.app.trace._logger') def test_process_cleanup_fails(self, _logger): - backend = mytask.backend - mytask.backend = Mock() - mytask.backend.process_cleanup = Mock(side_effect=KeyError()) - try: - tid = uuid() - ret = jail(tid, mytask.name, [2], {}) - self.assertEqual(ret, 4) - mytask.backend.store_result.assert_called_with(tid, 4, - states.SUCCESS) - self.assertIn('Process cleanup failed', - _logger.error.call_args[0][0]) - finally: - mytask.backend = backend + self.mytask.backend = Mock() + self.mytask.backend.process_cleanup = Mock(side_effect=KeyError()) + tid = uuid() + ret = jail(self.app, tid, self.mytask.name, [2], {}) + self.assertEqual(ret, 4) + self.assertTrue(self.mytask.backend.store_result.called) + self.assertIn('Process cleanup failed', _logger.error.call_args[0][0]) def test_process_cleanup_BaseException(self): - backend = mytask.backend - mytask.backend = Mock() - mytask.backend.process_cleanup = Mock(side_effect=SystemExit()) - try: - with self.assertRaises(SystemExit): - jail(uuid(), mytask.name, [2], {}) - finally: - mytask.backend = backend + self.mytask.backend = Mock() + self.mytask.backend.process_cleanup = Mock(side_effect=SystemExit()) + with self.assertRaises(SystemExit): + jail(self.app, uuid(), self.mytask.name, [2], {}) def test_execute_jail_success(self): - ret = jail(uuid(), mytask.name, [2], {}) + ret = jail(self.app, uuid(), self.mytask.name, [2], {}) self.assertEqual(ret, 4) def test_marked_as_started(self): + _started = [] - class Backend(mytask.backend.__class__): - _started = [] + def store_result(tid, meta, state, **kwars): + if state == states.STARTED: + _started.append(tid) + self.mytask.backend.store_result = Mock(name='store_result') + self.mytask.backend.store_result.side_effect = store_result + self.mytask.track_started = True - def store_result(self, tid, meta, state): - if state == states.STARTED: - self._started.append(tid) + tid = uuid() + jail(self.app, tid, self.mytask.name, [2], {}) + self.assertIn(tid, _started) - prev, mytask.backend = mytask.backend, Backend() - mytask.track_started = True - - try: - tid = uuid() - jail(tid, mytask.name, [2], {}) - self.assertIn(tid, Backend._started) - - mytask.ignore_result = True - tid = uuid() - jail(tid, mytask.name, [2], {}) - self.assertNotIn(tid, Backend._started) - finally: - mytask.backend = prev - mytask.track_started = False - mytask.ignore_result = False + self.mytask.ignore_result = True + tid = uuid() + jail(self.app, tid, self.mytask.name, [2], {}) + self.assertNotIn(tid, _started) def test_execute_jail_failure(self): - ret = jail(uuid(), mytask_raising.name, - [4], {}) + ret = jail( + self.app, uuid(), self.mytask_raising.name, [4], {}, + ) self.assertIsInstance(ret, ExceptionInfo) self.assertTupleEqual(ret.exception.args, (4, )) def test_execute_ignore_result(self): + + @self.app.task(shared=False, ignore_result=True) + def ignores_result(i): + return i ** i + task_id = uuid() - ret = jail(task_id, MyTaskIgnoreResult.name, [4], {}) + ret = jail(self.app, task_id, ignores_result.name, [4], {}) self.assertEqual(ret, 256) - self.assertFalse(AsyncResult(task_id).ready()) + self.assertFalse(self.app.AsyncResult(task_id).ready()) class MockEventDispatcher(object): @@ -237,127 +206,258 @@ class MockEventDispatcher(object): self.sent.append(event) -class test_TaskRequest(AppCase): +class test_Request(AppCase): + + def setup(self): + + @self.app.task(shared=False) + def add(x, y, **kw_): + return x + y + self.add = add + + @self.app.task(shared=False) + def mytask(i, **kwargs): + return i ** i + self.mytask = mytask + + @self.app.task(shared=False) + def mytask_raising(i): + raise KeyError(i) + self.mytask_raising = mytask_raising + + def get_request(self, sig, Request=Request, **kwargs): + return Request( + body_from_sig(self.app, sig), + on_ack=Mock(), + eventer=Mock(), + app=self.app, + connection_errors=(socket.error, ), + task=sig.type, + **kwargs + ) + + def test_invalid_eta_raises_InvalidTaskError(self): + with self.assertRaises(InvalidTaskError): + self.get_request(self.add.s(2, 2).set(eta='12345')) + + def test_invalid_expires_raises_InvalidTaskError(self): + with self.assertRaises(InvalidTaskError): + self.get_request(self.add.s(2, 2).set(expires='12345')) + + def test_valid_expires_with_utc_makes_aware(self): + with patch('celery.worker.job.maybe_make_aware') as mma: + self.get_request(self.add.s(2, 2).set(expires=10)) + self.assertTrue(mma.called) + + def test_maybe_expire_when_expires_is_None(self): + req = self.get_request(self.add.s(2, 2)) + self.assertFalse(req.maybe_expire()) + + def test_on_retry_acks_if_late(self): + self.add.acks_late = True + req = self.get_request(self.add.s(2, 2)) + req.on_retry(Mock()) + req.on_ack.assert_called_with(req_logger, req.connection_errors) + + def test_on_failure_Termianted(self): + einfo = None + try: + raise Terminated('9') + except Terminated: + einfo = ExceptionInfo() + self.assertIsNotNone(einfo) + req = self.get_request(self.add.s(2, 2)) + req.on_failure(einfo) + req.eventer.send.assert_called_with( + 'task-revoked', + uuid=req.id, terminated=True, signum='9', expired=False, + ) + + def test_log_error_propagates_MemoryError(self): + einfo = None + try: + raise MemoryError() + except MemoryError: + einfo = ExceptionInfo(internal=True) + self.assertIsNotNone(einfo) + req = self.get_request(self.add.s(2, 2)) + with self.assertRaises(MemoryError): + req._log_error(einfo) + + def test_log_error_when_Ignore(self): + einfo = None + try: + raise Ignore() + except Ignore: + einfo = ExceptionInfo(internal=True) + self.assertIsNotNone(einfo) + req = self.get_request(self.add.s(2, 2)) + req._log_error(einfo) + req.on_ack.assert_called_with(req_logger, req.connection_errors) + + def test_tzlocal_is_cached(self): + req = self.get_request(self.add.s(2, 2)) + req._tzlocal = 'foo' + self.assertEqual(req.tzlocal, 'foo') + + def test_execute_magic_kwargs(self): + task = self.add.s(2, 2) + task.freeze() + req = self.get_request(task) + self.add.accept_magic_kwargs = True + pool = Mock() + req.execute_using_pool(pool) + self.assertTrue(pool.apply_async.called) + args = pool.apply_async.call_args[1]['args'] + self.assertEqual(args[0], task.task) + self.assertEqual(args[1], task.id) + self.assertEqual(args[2], task.args) + kwargs = args[3] + self.assertEqual(kwargs.get('task_name'), task.task) + + def xRequest(self, body=None, **kwargs): + body = dict({'task': self.mytask.name, + 'id': uuid(), + 'args': [1], + 'kwargs': {'f': 'x'}}, **body or {}) + return Request(body, app=self.app, **kwargs) def test_task_wrapper_repr(self): - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - self.assertTrue(repr(tw)) + self.assertTrue(repr(self.xRequest())) @patch('celery.worker.job.kwdict') def test_kwdict(self, kwdict): - prev, module.NEEDS_KWDICT = module.NEEDS_KWDICT, True try: - TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) + self.xRequest() self.assertTrue(kwdict.called) finally: module.NEEDS_KWDICT = prev def test_sets_store_errors(self): - mytask.ignore_result = True - try: - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - self.assertFalse(tw.store_errors) - mytask.store_errors_even_if_ignored = True - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - self.assertTrue(tw.store_errors) - finally: - mytask.ignore_result = False - mytask.store_errors_even_if_ignored = False + self.mytask.ignore_result = True + job = self.xRequest() + self.assertFalse(job.store_errors) + + self.mytask.store_errors_even_if_ignored = True + job = self.xRequest() + self.assertTrue(job.store_errors) def test_send_event(self): - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - tw.eventer = MockEventDispatcher() - tw.send_event('task-frobulated') - self.assertIn('task-frobulated', tw.eventer.sent) + job = self.xRequest() + job.eventer = MockEventDispatcher() + job.send_event('task-frobulated') + self.assertIn('task-frobulated', job.eventer.sent) def test_on_retry(self): - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - tw.eventer = MockEventDispatcher() + job = Request({ + 'task': self.mytask.name, + 'id': uuid(), + 'args': [1], + 'kwargs': {'f': 'x'}, + }, app=self.app) + job.eventer = MockEventDispatcher() try: - raise RetryTaskError('foo', KeyError('moofoobar')) + raise Retry('foo', KeyError('moofoobar')) except: einfo = ExceptionInfo() - tw.on_failure(einfo) - self.assertIn('task-retried', tw.eventer.sent) + job.on_failure(einfo) + self.assertIn('task-retried', job.eventer.sent) prev, module._does_info = module._does_info, False try: - tw.on_failure(einfo) + job.on_failure(einfo) finally: module._does_info = prev einfo.internal = True - tw.on_failure(einfo) + job.on_failure(einfo) def test_compat_properties(self): - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - self.assertEqual(tw.task_id, tw.id) - self.assertEqual(tw.task_name, tw.name) - tw.task_id = 'ID' - self.assertEqual(tw.id, 'ID') - tw.task_name = 'NAME' - self.assertEqual(tw.name, 'NAME') + job = Request({ + 'task': self.mytask.name, + 'id': uuid(), + 'args': [1], + 'kwargs': {'f': 'x'}, + }, app=self.app) + self.assertEqual(job.task_id, job.id) + self.assertEqual(job.task_name, job.name) + job.task_id = 'ID' + self.assertEqual(job.id, 'ID') + job.task_name = 'NAME' + self.assertEqual(job.name, 'NAME') def test_terminate__task_started(self): pool = Mock() - signum = signal.SIGKILL - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - with assert_signal_called(task_revoked, sender=tw.task, - terminated=True, - expired=False, - signum=signum): - tw.time_start = time.time() - tw.worker_pid = 313 - tw.terminate(pool, signal='KILL') - pool.terminate_job.assert_called_with(tw.worker_pid, signum) + signum = signal.SIGTERM + job = Request({ + 'task': self.mytask.name, + 'id': uuid(), + 'args': [1], + 'kwrgs': {'f': 'x'}, + }, app=self.app) + with assert_signal_called( + task_revoked, sender=job.task, request=job, + terminated=True, expired=False, signum=signum): + job.time_start = monotonic() + job.worker_pid = 313 + job.terminate(pool, signal='TERM') + pool.terminate_job.assert_called_with(job.worker_pid, signum) def test_terminate__task_reserved(self): pool = Mock() - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - tw.time_start = None - tw.terminate(pool, signal='KILL') + job = Request({ + 'task': self.mytask.name, + 'id': uuid(), + 'args': [1], + 'kwargs': {'f': 'x'}, + }, app=self.app) + job.time_start = None + job.terminate(pool, signal='TERM') self.assertFalse(pool.terminate_job.called) - self.assertTupleEqual(tw._terminate_on_ack, (pool, 'KILL')) - tw.terminate(pool, signal='KILL') + self.assertTupleEqual(job._terminate_on_ack, (pool, 15)) + job.terminate(pool, signal='TERM') def test_revoked_expires_expired(self): - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}, - expires=datetime.utcnow() - timedelta(days=1)) - with assert_signal_called(task_revoked, sender=tw.task, - terminated=False, - expired=True, - signum=None): - tw.revoked() - self.assertIn(tw.id, revoked) - self.assertEqual(mytask.backend.get_status(tw.id), - states.REVOKED) + job = Request({ + 'task': self.mytask.name, + 'id': uuid(), + 'args': [1], + 'kwargs': {'f': 'x'}, + 'expires': datetime.utcnow() - timedelta(days=1), + }, app=self.app) + with assert_signal_called( + task_revoked, sender=job.task, request=job, + terminated=False, expired=True, signum=None): + job.revoked() + self.assertIn(job.id, revoked) + self.assertEqual( + self.mytask.backend.get_status(job.id), + states.REVOKED, + ) def test_revoked_expires_not_expired(self): - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}, - expires=datetime.utcnow() + timedelta(days=1)) - tw.revoked() - self.assertNotIn(tw.id, revoked) + job = self.xRequest({ + 'expires': datetime.utcnow() + timedelta(days=1), + }) + job.revoked() + self.assertNotIn(job.id, revoked) self.assertNotEqual( - mytask.backend.get_status(tw.id), + self.mytask.backend.get_status(job.id), states.REVOKED, ) def test_revoked_expires_ignore_result(self): - mytask.ignore_result = True - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}, - expires=datetime.utcnow() - timedelta(days=1)) - try: - tw.revoked() - self.assertIn(tw.id, revoked) - self.assertNotEqual(mytask.backend.get_status(tw.id), - states.REVOKED) - - finally: - mytask.ignore_result = False + self.mytask.ignore_result = True + job = self.xRequest({ + 'expires': datetime.utcnow() - timedelta(days=1), + }) + job.revoked() + self.assertIn(job.id, revoked) + self.assertNotEqual( + self.mytask.backend.get_status(job.id), states.REVOKED, + ) def test_send_email(self): - app = app_or_default() - old_mail_admins = app.mail_admins - old_enable_mails = mytask.send_error_emails + app = self.app mail_sent = [False] def mock_mail_admins(*args, **kwargs): @@ -370,158 +470,134 @@ class test_TaskRequest(AppCase): return ExceptionInfo() app.mail_admins = mock_mail_admins - mytask.send_error_emails = True - try: - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) + self.mytask.send_error_emails = True + job = self.xRequest() + einfo = get_ei() + job.on_failure(einfo) + self.assertTrue(mail_sent[0]) - einfo = get_ei() - tw.on_failure(einfo) - self.assertTrue(mail_sent[0]) + einfo = get_ei() + mail_sent[0] = False + self.mytask.send_error_emails = False + job.on_failure(einfo) + self.assertFalse(mail_sent[0]) - einfo = get_ei() - mail_sent[0] = False - mytask.send_error_emails = False - tw.on_failure(einfo) - self.assertFalse(mail_sent[0]) - - einfo = get_ei() - mail_sent[0] = False - mytask.send_error_emails = True - mytask.error_whitelist = [KeyError] - tw.on_failure(einfo) - self.assertTrue(mail_sent[0]) - - einfo = get_ei() - mail_sent[0] = False - mytask.send_error_emails = True - mytask.error_whitelist = [SyntaxError] - tw.on_failure(einfo) - self.assertFalse(mail_sent[0]) - - finally: - app.mail_admins = old_mail_admins - mytask.send_error_emails = old_enable_mails - mytask.error_whitelist = () + einfo = get_ei() + mail_sent[0] = False + self.mytask.send_error_emails = True + job.on_failure(einfo) + self.assertTrue(mail_sent[0]) def test_already_revoked(self): - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - tw._already_revoked = True - self.assertTrue(tw.revoked()) + job = self.xRequest() + job._already_revoked = True + self.assertTrue(job.revoked()) def test_revoked(self): - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - with assert_signal_called(task_revoked, sender=tw.task, - terminated=False, - expired=False, - signum=None): - revoked.add(tw.id) - self.assertTrue(tw.revoked()) - self.assertTrue(tw._already_revoked) - self.assertTrue(tw.acknowledged) + job = self.xRequest() + with assert_signal_called( + task_revoked, sender=job.task, request=job, + terminated=False, expired=False, signum=None): + revoked.add(job.id) + self.assertTrue(job.revoked()) + self.assertTrue(job._already_revoked) + self.assertTrue(job.acknowledged) def test_execute_does_not_execute_revoked(self): - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - revoked.add(tw.id) - tw.execute() + job = self.xRequest() + revoked.add(job.id) + job.execute() def test_execute_acks_late(self): - mytask_raising.acks_late = True - tw = TaskRequest(mytask_raising.name, uuid(), [1]) - try: - tw.execute() - self.assertTrue(tw.acknowledged) - tw.task.accept_magic_kwargs = False - tw.execute() - finally: - mytask_raising.acks_late = False + self.mytask_raising.acks_late = True + job = self.xRequest({ + 'task': self.mytask_raising.name, + 'kwargs': {}, + }) + job.execute() + self.assertTrue(job.acknowledged) + job.execute() def test_execute_using_pool_does_not_execute_revoked(self): - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - revoked.add(tw.id) + job = self.xRequest() + revoked.add(job.id) with self.assertRaises(TaskRevokedError): - tw.execute_using_pool(None) + job.execute_using_pool(None) def test_on_accepted_acks_early(self): - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - tw.on_accepted(pid=os.getpid(), time_accepted=time.time()) - self.assertTrue(tw.acknowledged) + job = self.xRequest() + job.on_accepted(pid=os.getpid(), time_accepted=monotonic()) + self.assertTrue(job.acknowledged) prev, module._does_debug = module._does_debug, False try: - tw.on_accepted(pid=os.getpid(), time_accepted=time.time()) + job.on_accepted(pid=os.getpid(), time_accepted=monotonic()) finally: module._does_debug = prev def test_on_accepted_acks_late(self): - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - mytask.acks_late = True - try: - tw.on_accepted(pid=os.getpid(), time_accepted=time.time()) - self.assertFalse(tw.acknowledged) - finally: - mytask.acks_late = False + job = self.xRequest() + self.mytask.acks_late = True + job.on_accepted(pid=os.getpid(), time_accepted=monotonic()) + self.assertFalse(job.acknowledged) def test_on_accepted_terminates(self): - signum = signal.SIGKILL + signum = signal.SIGTERM pool = Mock() - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - with assert_signal_called(task_revoked, sender=tw.task, - terminated=True, - expired=False, - signum=signum): - tw.terminate(pool, signal='KILL') + job = self.xRequest() + with assert_signal_called( + task_revoked, sender=job.task, request=job, + terminated=True, expired=False, signum=signum): + job.terminate(pool, signal='TERM') self.assertFalse(pool.terminate_job.call_count) - tw.on_accepted(pid=314, time_accepted=time.time()) + job.on_accepted(pid=314, time_accepted=monotonic()) pool.terminate_job.assert_called_with(314, signum) def test_on_success_acks_early(self): - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - tw.time_start = 1 - tw.on_success(42) + job = self.xRequest() + job.time_start = 1 + job.on_success(42) prev, module._does_info = module._does_info, False try: - tw.on_success(42) - self.assertFalse(tw.acknowledged) + job.on_success(42) + self.assertFalse(job.acknowledged) finally: module._does_info = prev def test_on_success_BaseException(self): - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - tw.time_start = 1 + job = self.xRequest() + job.time_start = 1 with self.assertRaises(SystemExit): try: raise SystemExit() except SystemExit: - tw.on_success(ExceptionInfo()) + job.on_success(ExceptionInfo()) else: assert False def test_on_success_eventer(self): - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - tw.time_start = 1 - tw.eventer = Mock() - tw.send_event = Mock() - tw.on_success(42) - self.assertTrue(tw.send_event.called) + job = self.xRequest() + job.time_start = 1 + job.eventer = Mock() + job.eventer.send = Mock() + job.on_success(42) + self.assertTrue(job.eventer.send.called) def test_on_success_when_failure(self): - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - tw.time_start = 1 - tw.on_failure = Mock() + job = self.xRequest() + job.time_start = 1 + job.on_failure = Mock() try: raise KeyError('foo') except Exception: - tw.on_success(ExceptionInfo()) - self.assertTrue(tw.on_failure.called) + job.on_success(ExceptionInfo()) + self.assertTrue(job.on_failure.called) def test_on_success_acks_late(self): - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - tw.time_start = 1 - mytask.acks_late = True - try: - tw.on_success(42) - self.assertTrue(tw.acknowledged) - finally: - mytask.acks_late = False + job = self.xRequest() + job.time_start = 1 + self.mytask.acks_late = True + job.on_success(42) + self.assertTrue(job.acknowledged) def test_on_failure_WorkerLostError(self): @@ -531,187 +607,191 @@ class test_TaskRequest(AppCase): except WorkerLostError: return ExceptionInfo() - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) + job = self.xRequest() exc_info = get_ei() - tw.on_failure(exc_info) - self.assertEqual(mytask.backend.get_status(tw.id), - states.FAILURE) + job.on_failure(exc_info) + self.assertEqual( + self.mytask.backend.get_status(job.id), states.FAILURE, + ) - mytask.ignore_result = True - try: - exc_info = get_ei() - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - tw.on_failure(exc_info) - self.assertEqual(mytask.backend.get_status(tw.id), - states.PENDING) - finally: - mytask.ignore_result = False + self.mytask.ignore_result = True + exc_info = get_ei() + job = self.xRequest() + job.on_failure(exc_info) + self.assertEqual( + self.mytask.backend.get_status(job.id), states.PENDING, + ) def test_on_failure_acks_late(self): - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - tw.time_start = 1 - mytask.acks_late = True + job = self.xRequest() + job.time_start = 1 + self.mytask.acks_late = True try: - try: - raise KeyError('foo') - except KeyError: - exc_info = ExceptionInfo() - tw.on_failure(exc_info) - self.assertTrue(tw.acknowledged) - finally: - mytask.acks_late = False + raise KeyError('foo') + except KeyError: + exc_info = ExceptionInfo() + job.on_failure(exc_info) + self.assertTrue(job.acknowledged) def test_from_message_invalid_kwargs(self): - body = dict(task=mytask.name, id=1, args=(), kwargs='foo') + body = dict(task=self.mytask.name, id=1, args=(), kwargs='foo') with self.assertRaises(InvalidTaskError): - TaskRequest.from_message(None, body) + Request(body, message=None, app=self.app) @patch('celery.worker.job.error') @patch('celery.worker.job.warn') def test_on_timeout(self, warn, error): - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - tw.on_timeout(soft=True, timeout=1337) + job = self.xRequest() + job.on_timeout(soft=True, timeout=1337) self.assertIn('Soft time limit', warn.call_args[0][0]) - tw.on_timeout(soft=False, timeout=1337) + job.on_timeout(soft=False, timeout=1337) self.assertIn('Hard time limit', error.call_args[0][0]) - self.assertEqual(mytask.backend.get_status(tw.id), - states.FAILURE) + self.assertEqual( + self.mytask.backend.get_status(job.id), states.FAILURE, + ) - mytask.ignore_result = True - try: - tw = TaskRequest(mytask.name, uuid(), [1], {'f': 'x'}) - tw.on_timeout(soft=True, timeout=1336) - self.assertEqual(mytask.backend.get_status(tw.id), - states.PENDING) - finally: - mytask.ignore_result = False + self.mytask.ignore_result = True + job = self.xRequest() + job.on_timeout(soft=True, timeout=1336) + self.assertEqual( + self.mytask.backend.get_status(job.id), states.PENDING, + ) def test_fast_trace_task(self): - from celery.task import trace + from celery.app import trace setup_worker_optimizations(self.app) self.assertIs(trace.trace_task_ret, trace._fast_trace_task) try: - mytask.__trace__ = build_tracer(mytask.name, mytask, - self.app.loader, 'test') - res = trace.trace_task_ret(mytask.name, uuid(), [4], {}) + self.mytask.__trace__ = build_tracer( + self.mytask.name, self.mytask, self.app.loader, 'test', + app=self.app, + ) + res = trace.trace_task_ret(self.mytask.name, uuid(), [4], {}) self.assertEqual(res, 4 ** 4) finally: reset_worker_optimizations() self.assertIs(trace.trace_task_ret, trace._trace_task_ret) - delattr(mytask, '__trace__') - res = trace.trace_task_ret(mytask.name, uuid(), [4], {}) + delattr(self.mytask, '__trace__') + res = trace.trace_task_ret( + self.mytask.name, uuid(), [4], {}, app=self.app, + ) self.assertEqual(res, 4 ** 4) def test_trace_task_ret(self): - mytask.__trace__ = build_tracer(mytask.name, mytask, - self.app.loader, 'test') - res = _trace_task_ret(mytask.name, uuid(), [4], {}) + self.mytask.__trace__ = build_tracer( + self.mytask.name, self.mytask, self.app.loader, 'test', + app=self.app, + ) + res = _trace_task_ret(self.mytask.name, uuid(), [4], {}, app=self.app) self.assertEqual(res, 4 ** 4) def test_trace_task_ret__no_trace(self): try: - delattr(mytask, '__trace__') + delattr(self.mytask, '__trace__') except AttributeError: pass - res = _trace_task_ret(mytask.name, uuid(), [4], {}) + res = _trace_task_ret(self.mytask.name, uuid(), [4], {}, app=self.app) self.assertEqual(res, 4 ** 4) - def test_execute_safe_catches_exception(self): + def test_trace_catches_exception(self): def _error_exec(self, *args, **kwargs): raise KeyError('baz') - @task_dec(request=None) + @self.app.task(request=None, shared=False) def raising(): raise KeyError('baz') - with self.assertWarnsRegex( - RuntimeWarning, r'Exception raised outside'): - res = trace_task(raising, uuid(), [], {}) + with self.assertWarnsRegex(RuntimeWarning, + r'Exception raised outside'): + res = trace_task(raising, uuid(), [], {}, app=self.app) self.assertIsInstance(res, ExceptionInfo) def test_worker_task_trace_handle_retry(self): - from celery.exceptions import RetryTaskError tid = uuid() - mytask.push_request(id=tid) + self.mytask.push_request(id=tid) try: raise ValueError('foo') - except Exception, exc: + except Exception as exc: try: - raise RetryTaskError(str(exc), exc=exc) - except RetryTaskError, exc: + raise Retry(str(exc), exc=exc) + except Retry as exc: w = TraceInfo(states.RETRY, exc) - w.handle_retry(mytask, store_errors=False) - self.assertEqual(mytask.backend.get_status(tid), - states.PENDING) - w.handle_retry(mytask, store_errors=True) - self.assertEqual(mytask.backend.get_status(tid), - states.RETRY) + w.handle_retry(self.mytask, store_errors=False) + self.assertEqual( + self.mytask.backend.get_status(tid), states.PENDING, + ) + w.handle_retry(self.mytask, store_errors=True) + self.assertEqual( + self.mytask.backend.get_status(tid), states.RETRY, + ) finally: - mytask.pop_request() + self.mytask.pop_request() def test_worker_task_trace_handle_failure(self): tid = uuid() - mytask.push_request() + self.mytask.push_request() try: - mytask.request.id = tid + self.mytask.request.id = tid try: raise ValueError('foo') - except Exception, exc: + except Exception as exc: w = TraceInfo(states.FAILURE, exc) - w.handle_failure(mytask, store_errors=False) - self.assertEqual(mytask.backend.get_status(tid), - states.PENDING) - w.handle_failure(mytask, store_errors=True) - self.assertEqual(mytask.backend.get_status(tid), - states.FAILURE) + w.handle_failure(self.mytask, store_errors=False) + self.assertEqual( + self.mytask.backend.get_status(tid), states.PENDING, + ) + w.handle_failure(self.mytask, store_errors=True) + self.assertEqual( + self.mytask.backend.get_status(tid), states.FAILURE, + ) finally: - mytask.pop_request() + self.mytask.pop_request() def test_task_wrapper_mail_attrs(self): - tw = TaskRequest(mytask.name, uuid(), [], {}) - x = tw.success_msg % { - 'name': tw.name, - 'id': tw.id, + job = self.xRequest({'args': [], 'kwargs': {}}) + x = job.success_msg % { + 'name': job.name, + 'id': job.id, 'return_value': 10, 'runtime': 0.3641, } self.assertTrue(x) - x = tw.error_msg % { - 'name': tw.name, - 'id': tw.id, + x = job.error_msg % { + 'name': job.name, + 'id': job.id, 'exc': 'FOOBARBAZ', 'traceback': 'foobarbaz', } self.assertTrue(x) def test_from_message(self): - us = u'æØåveéðƒeæ' - body = {'task': mytask.name, 'id': uuid(), + us = 'æØåveéðƒeæ' + body = {'task': self.mytask.name, 'id': uuid(), 'args': [2], 'kwargs': {us: 'bar'}} m = Message(None, body=anyjson.dumps(body), backend='foo', content_type='application/json', content_encoding='utf-8') - tw = TaskRequest.from_message(m, m.decode()) - self.assertIsInstance(tw, Request) - self.assertEqual(tw.name, body['task']) - self.assertEqual(tw.id, body['id']) - self.assertEqual(tw.args, body['args']) + job = Request(m.decode(), message=m, app=self.app) + self.assertIsInstance(job, Request) + self.assertEqual(job.name, body['task']) + self.assertEqual(job.id, body['id']) + self.assertEqual(job.args, body['args']) us = from_utf8(us) if sys.version_info < (2, 6): - self.assertEqual(tw.kwargs.keys()[0], us) - self.assertIsInstance(tw.kwargs.keys()[0], str) + self.assertEqual(next(keys(job.kwargs)), us) + self.assertIsInstance(next(keys(job.kwargs)), str) def test_from_message_empty_args(self): - body = {'task': mytask.name, 'id': uuid()} + body = {'task': self.mytask.name, 'id': uuid()} m = Message(None, body=anyjson.dumps(body), backend='foo', content_type='application/json', content_encoding='utf-8') - tw = TaskRequest.from_message(m, m.decode()) - self.assertIsInstance(tw, Request) - self.assertEquals(tw.args, []) - self.assertEquals(tw.kwargs, {}) + job = Request(m.decode(), message=m, app=self.app) + self.assertIsInstance(job, Request) + self.assertEqual(job.args, []) + self.assertEqual(job.kwargs, {}) def test_from_message_missing_required_fields(self): body = {} @@ -719,63 +799,94 @@ class test_TaskRequest(AppCase): content_type='application/json', content_encoding='utf-8') with self.assertRaises(KeyError): - TaskRequest.from_message(m, m.decode()) + Request(m.decode(), message=m, app=self.app) def test_from_message_nonexistant_task(self): body = {'task': 'cu.mytask.doesnotexist', 'id': uuid(), - 'args': [2], 'kwargs': {u'æØåveéðƒeæ': 'bar'}} + 'args': [2], 'kwargs': {'æØåveéðƒeæ': 'bar'}} m = Message(None, body=anyjson.dumps(body), backend='foo', content_type='application/json', content_encoding='utf-8') with self.assertRaises(KeyError): - TaskRequest.from_message(m, m.decode()) + Request(m.decode(), message=m, app=self.app) def test_execute(self): tid = uuid() - tw = TaskRequest(mytask.name, tid, [4], {'f': 'x'}) - self.assertEqual(tw.execute(), 256) - meta = mytask.backend.get_task_meta(tid) - self.assertEqual(meta['result'], 256) + job = self.xRequest({'id': tid, 'args': [4], 'kwargs': {}}) + self.assertEqual(job.execute(), 256) + meta = self.mytask.backend.get_task_meta(tid) self.assertEqual(meta['status'], states.SUCCESS) + self.assertEqual(meta['result'], 256) def test_execute_success_no_kwargs(self): + + @self.app.task # traverses coverage for decorator without parens + def mytask_no_kwargs(i): + return i ** i + tid = uuid() - tw = TaskRequest(mytask_no_kwargs.name, tid, [4], {}) - self.assertEqual(tw.execute(), 256) + job = self.xRequest({ + 'task': mytask_no_kwargs.name, + 'id': tid, + 'args': [4], + 'kwargs': {}, + }) + self.assertEqual(job.execute(), 256) meta = mytask_no_kwargs.backend.get_task_meta(tid) self.assertEqual(meta['result'], 256) self.assertEqual(meta['status'], states.SUCCESS) def test_execute_success_some_kwargs(self): + scratch = {'task_id': None} + + @self.app.task(shared=False, accept_magic_kwargs=True) + def mytask_some_kwargs(i, task_id): + scratch['task_id'] = task_id + return i ** i + tid = uuid() - tw = TaskRequest(mytask_some_kwargs.name, tid, [4], {}) - self.assertEqual(tw.execute(), 256) + job = self.xRequest({ + 'task': mytask_some_kwargs.name, + 'id': tid, + 'args': [4], + 'kwargs': {}, + }) + self.assertEqual(job.execute(), 256) meta = mytask_some_kwargs.backend.get_task_meta(tid) - self.assertEqual(some_kwargs_scratchpad.get('task_id'), tid) + self.assertEqual(scratch.get('task_id'), tid) self.assertEqual(meta['result'], 256) self.assertEqual(meta['status'], states.SUCCESS) def test_execute_ack(self): + scratch = {'ACK': False} + + def on_ack(*args, **kwargs): + scratch['ACK'] = True + tid = uuid() - tw = TaskRequest(mytask.name, tid, [4], {'f': 'x'}, - on_ack=on_ack) - self.assertEqual(tw.execute(), 256) - meta = mytask.backend.get_task_meta(tid) + job = self.xRequest({'id': tid, 'args': [4]}, on_ack=on_ack) + self.assertEqual(job.execute(), 256) + meta = self.mytask.backend.get_task_meta(tid) self.assertTrue(scratch['ACK']) self.assertEqual(meta['result'], 256) self.assertEqual(meta['status'], states.SUCCESS) def test_execute_fail(self): tid = uuid() - tw = TaskRequest(mytask_raising.name, tid, [4]) - self.assertIsInstance(tw.execute(), ExceptionInfo) - meta = mytask_raising.backend.get_task_meta(tid) + job = self.xRequest({ + 'task': self.mytask_raising.name, + 'id': tid, + 'args': [4], + 'kwargs': {}, + }) + self.assertIsInstance(job.execute(), ExceptionInfo) + meta = self.mytask_raising.backend.get_task_meta(tid) self.assertEqual(meta['status'], states.FAILURE) self.assertIsInstance(meta['result'], KeyError) def test_execute_using_pool(self): tid = uuid() - tw = TaskRequest(mytask.name, tid, [4], {'f': 'x'}) + job = self.xRequest({'id': tid, 'args': [4]}) class MockPool(BasePool): target = None @@ -792,60 +903,59 @@ class test_TaskRequest(AppCase): self.kwargs = kwargs p = MockPool() - tw.execute_using_pool(p) + job.execute_using_pool(p) self.assertTrue(p.target) - self.assertEqual(p.args[0], mytask.name) + self.assertEqual(p.args[0], self.mytask.name) self.assertEqual(p.args[1], tid) self.assertEqual(p.args[2], [4]) self.assertIn('f', p.args[3]) self.assertIn([4], p.args) - tw.task.accept_magic_kwargs = False - tw.execute_using_pool(p) + job.task.accept_magic_kwargs = False + job.execute_using_pool(p) def test_default_kwargs(self): + self.maxDiff = 3000 tid = uuid() - tw = TaskRequest(mytask.name, tid, [4], {'f': 'x'}) + job = self.xRequest({'id': tid, 'args': [4]}) self.assertDictEqual( - tw.extend_with_default_kwargs(), { + job.extend_with_default_kwargs(), { 'f': 'x', 'logfile': None, 'loglevel': None, - 'task_id': tw.id, + 'task_id': job.id, 'task_retries': 0, 'task_is_eager': False, 'delivery_info': { 'exchange': None, 'routing_key': None, - 'priority': None, + 'priority': 0, + 'redelivered': False, }, - 'task_name': tw.name}) + 'task_name': job.name}) @patch('celery.worker.job.logger') def _test_on_failure(self, exception, logger): - app = app_or_default() + app = self.app tid = uuid() - tw = TaskRequest(mytask.name, tid, [4], {'f': 'x'}) + job = self.xRequest({'id': tid, 'args': [4]}) try: raise exception except Exception: exc_info = ExceptionInfo() app.conf.CELERY_SEND_TASK_ERROR_EMAILS = True - try: - tw.on_failure(exc_info) - self.assertTrue(logger.log.called) - context = logger.log.call_args[0][2] - self.assertEqual(mytask.name, context['name']) - self.assertIn(tid, context['id']) - finally: - app.conf.CELERY_SEND_TASK_ERROR_EMAILS = False + job.on_failure(exc_info) + self.assertTrue(logger.log.called) + context = logger.log.call_args[0][2] + self.assertEqual(self.mytask.name, context['name']) + self.assertIn(tid, context['id']) def test_on_failure(self): self._test_on_failure(Exception('Inside unit tests')) def test_on_failure_unicode_exception(self): - self._test_on_failure(Exception(u'Бобры атакуют')) + self._test_on_failure(Exception('Бобры атакуют')) def test_on_failure_utf8_exception(self): self._test_on_failure(Exception( - from_utf8(u'Бобры атакуют'))) + from_utf8('Бобры атакуют'))) diff --git a/awx/lib/site-packages/celery/tests/worker/test_revoke.py b/awx/lib/site-packages/celery/tests/worker/test_revoke.py index 61c7fb6d44..4d5ad02121 100644 --- a/awx/lib/site-packages/celery/tests/worker/test_revoke.py +++ b/awx/lib/site-packages/celery/tests/worker/test_revoke.py @@ -1,10 +1,10 @@ from __future__ import absolute_import from celery.worker import state -from celery.tests.utils import Case +from celery.tests.case import AppCase -class test_revoked(Case): +class test_revoked(AppCase): def test_is_working(self): state.revoked.add('foo') diff --git a/awx/lib/site-packages/celery/tests/worker/test_state.py b/awx/lib/site-packages/celery/tests/worker/test_state.py index 161db03df8..eb92bb49a6 100644 --- a/awx/lib/site-packages/celery/tests/worker/test_state.py +++ b/awx/lib/site-packages/celery/tests/worker/test_state.py @@ -1,31 +1,29 @@ from __future__ import absolute_import +import pickle + +from time import time + from celery.datastructures import LimitedSet +from celery.exceptions import SystemTerminate from celery.worker import state -from celery.tests.utils import Case + +from celery.tests.case import AppCase, Mock, patch -class StateResetCase(Case): +class StateResetCase(AppCase): - def setUp(self): + def setup(self): self.reset_state() - self.on_setup() - def tearDown(self): + def teardown(self): self.reset_state() - self.on_teardown() def reset_state(self): state.active_requests.clear() state.revoked.clear() state.total_count.clear() - def on_setup(self): - pass - - def on_teardown(self): - pass - class MockShelve(dict): filename = None @@ -47,10 +45,28 @@ class MyPersistent(state.Persistent): storage = MockShelve() +class test_maybe_shutdown(AppCase): + + def teardown(self): + state.should_stop = False + state.should_terminate = False + + def test_should_stop(self): + state.should_stop = True + with self.assertRaises(SystemExit): + state.maybe_shutdown() + + def test_should_terminate(self): + state.should_terminate = True + with self.assertRaises(SystemTerminate): + state.maybe_shutdown() + + class test_Persistent(StateResetCase): - def on_setup(self): - self.p = MyPersistent(filename='celery-state') + def setup(self): + self.reset_state() + self.p = MyPersistent(state, filename='celery-state') def test_close_twice(self): self.p._is_open = False @@ -72,19 +88,47 @@ class test_Persistent(StateResetCase): def test_merge(self, data=['foo', 'bar', 'baz']): self.add_revoked(*data) - self.p.merge(self.p.db) + self.p.merge() for item in data: self.assertIn(item, state.revoked) + def test_merge_dict(self): + self.p.clock = Mock() + self.p.clock.adjust.return_value = 626 + d = {'revoked': {'abc': time()}, 'clock': 313} + self.p._merge_with(d) + self.p.clock.adjust.assert_called_with(313) + self.assertEqual(d['clock'], 626) + self.assertIn('abc', state.revoked) + + def test_sync_clock_and_purge(self): + passthrough = Mock() + passthrough.side_effect = lambda x: x + with patch('celery.worker.state.revoked') as revoked: + d = {'clock': 0} + self.p.clock = Mock() + self.p.clock.forward.return_value = 627 + self.p._dumps = passthrough + self.p.compress = passthrough + self.p._sync_with(d) + revoked.purge.assert_called_with() + self.assertEqual(d['clock'], 627) + self.assertNotIn('revoked', d) + self.assertIs(d['zrevoked'], revoked) + def test_sync(self, data1=['foo', 'bar', 'baz'], data2=['baz', 'ini', 'koz']): self.add_revoked(*data1) for item in data2: state.revoked.add(item) - self.p.sync(self.p.db) + self.p.sync() + self.assertTrue(self.p.db['zrevoked']) + pickled = self.p.decompress(self.p.db['zrevoked']) + self.assertTrue(pickled) + saved = pickle.loads(pickled) for item in data2: - self.assertIn(item, self.p.db['revoked']) + self.assertIn(item, saved) class SimpleReq(object): diff --git a/awx/lib/site-packages/celery/tests/worker/test_strategy.py b/awx/lib/site-packages/celery/tests/worker/test_strategy.py new file mode 100644 index 0000000000..7edf78bff2 --- /dev/null +++ b/awx/lib/site-packages/celery/tests/worker/test_strategy.py @@ -0,0 +1,139 @@ +from __future__ import absolute_import + +from collections import defaultdict +from contextlib import contextmanager + +from kombu.utils.limits import TokenBucket + +from celery.worker import state +from celery.utils.timeutils import rate + +from celery.tests.case import AppCase, Mock, patch, body_from_sig + + +class test_default_strategy(AppCase): + + def setup(self): + @self.app.task(shared=False) + def add(x, y): + return x + y + + self.add = add + + class Context(object): + + def __init__(self, sig, s, reserved, consumer, message, body): + self.sig = sig + self.s = s + self.reserved = reserved + self.consumer = consumer + self.message = message + self.body = body + + def __call__(self, **kwargs): + return self.s( + self.message, self.body, + self.message.ack, self.message.reject, [], **kwargs + ) + + def was_reserved(self): + return self.reserved.called + + def was_rate_limited(self): + assert not self.was_reserved() + return self.consumer._limit_task.called + + def was_scheduled(self): + assert not self.was_reserved() + assert not self.was_rate_limited() + return self.consumer.timer.call_at.called + + def event_sent(self): + return self.consumer.event_dispatcher.send.call_args + + def get_request(self): + if self.was_reserved(): + return self.reserved.call_args[0][0] + if self.was_rate_limited(): + return self.consumer._limit_task.call_args[0][0] + if self.was_scheduled(): + return self.consumer.timer.call_at.call_args[0][0] + raise ValueError('request not handled') + + @contextmanager + def _context(self, sig, + rate_limits=True, events=True, utc=True, limit=None): + self.assertTrue(sig.type.Strategy) + + reserved = Mock() + consumer = Mock() + consumer.task_buckets = defaultdict(lambda: None) + if limit: + bucket = TokenBucket(rate(limit), capacity=1) + consumer.task_buckets[sig.task] = bucket + consumer.disable_rate_limits = not rate_limits + consumer.event_dispatcher.enabled = events + s = sig.type.start_strategy(self.app, consumer, task_reserved=reserved) + self.assertTrue(s) + + message = Mock() + body = body_from_sig(self.app, sig, utc=utc) + + yield self.Context(sig, s, reserved, consumer, message, body) + + def test_when_logging_disabled(self): + with patch('celery.worker.strategy.logger') as logger: + logger.isEnabledFor.return_value = False + with self._context(self.add.s(2, 2)) as C: + C() + self.assertFalse(logger.info.called) + + def test_task_strategy(self): + with self._context(self.add.s(2, 2)) as C: + C() + self.assertTrue(C.was_reserved()) + req = C.get_request() + C.consumer.on_task_request.assert_called_with(req) + self.assertTrue(C.event_sent()) + + def test_when_events_disabled(self): + with self._context(self.add.s(2, 2), events=False) as C: + C() + self.assertTrue(C.was_reserved()) + self.assertFalse(C.event_sent()) + + def test_eta_task(self): + with self._context(self.add.s(2, 2).set(countdown=10)) as C: + C() + self.assertTrue(C.was_scheduled()) + C.consumer.qos.increment_eventually.assert_called_with() + + def test_eta_task_utc_disabled(self): + with self._context(self.add.s(2, 2).set(countdown=10), utc=False) as C: + C() + self.assertTrue(C.was_scheduled()) + C.consumer.qos.increment_eventually.assert_called_with() + + def test_when_rate_limited(self): + task = self.add.s(2, 2) + with self._context(task, rate_limits=True, limit='1/m') as C: + C() + self.assertTrue(C.was_rate_limited()) + + def test_when_rate_limited__limits_disabled(self): + task = self.add.s(2, 2) + with self._context(task, rate_limits=False, limit='1/m') as C: + C() + self.assertTrue(C.was_reserved()) + + def test_when_revoked(self): + task = self.add.s(2, 2) + task.freeze() + state.revoked.add(task.id) + try: + with self._context(task) as C: + C() + with self.assertRaises(ValueError): + C.get_request() + finally: + state.revoked.discard(task.id) diff --git a/awx/lib/site-packages/celery/tests/worker/test_worker.py b/awx/lib/site-packages/celery/tests/worker/test_worker.py index 044f033073..db773d71fc 100644 --- a/awx/lib/site-packages/celery/tests/worker/test_worker.py +++ b/awx/lib/site-packages/celery/tests/worker/test_worker.py @@ -1,56 +1,78 @@ from __future__ import absolute_import -from __future__ import with_statement +import os import socket -import sys from collections import deque from datetime import datetime, timedelta -from Queue import Empty +from threading import Event -from billiard.exceptions import WorkerLostError +from amqp import ChannelError from kombu import Connection -from kombu.exceptions import StdChannelError +from kombu.common import QoS, ignore_errors from kombu.transport.base import Message -from mock import Mock, patch -from nose import SkipTest -from celery import current_app from celery.app.defaults import DEFAULTS +from celery.bootsteps import RUN, CLOSE, StartStopStep from celery.concurrency.base import BasePool from celery.datastructures import AttributeDict -from celery.exceptions import SystemTerminate -from celery.task import task as task_dec -from celery.task import periodic_task as periodic_task_dec +from celery.exceptions import SystemTerminate, TaskRevokedError +from celery.five import Empty, range, Queue as FastQueue from celery.utils import uuid -from celery.worker import WorkController, Queues, Timers, EvLoop, Pool -from celery.worker.buckets import FastQueue, AsyncTaskBucket +from celery.worker import components +from celery.worker import consumer +from celery.worker.consumer import Consumer as __Consumer from celery.worker.job import Request -from celery.worker.consumer import BlockingConsumer -from celery.worker.consumer import QoS, RUN, PREFETCH_COUNT_MAX, CLOSE +from celery.utils import worker_direct from celery.utils.serialization import pickle from celery.utils.timer2 import Timer -from celery.utils.threads import Event -from celery.tests.utils import AppCase, Case +from celery.tests.case import AppCase, Mock, patch, restore_logging + + +def MockStep(step=None): + step = Mock() if step is None else step + step.blueprint = Mock() + step.blueprint.name = 'MockNS' + step.name = 'MockStep(%s)' % (id(step), ) + return step class PlaceHolder(object): pass -class MyKombuConsumer(BlockingConsumer): +def find_step(obj, typ): + return obj.blueprint.steps[typ.name] + + +class Consumer(__Consumer): + + def __init__(self, *args, **kwargs): + kwargs.setdefault('without_mingle', True) # disable Mingle step + kwargs.setdefault('without_gossip', True) # disable Gossip step + kwargs.setdefault('without_heartbeat', True) # disable Heart step + super(Consumer, self).__init__(*args, **kwargs) + + +class _MyKombuConsumer(Consumer): broadcast_consumer = Mock() task_consumer = Mock() def __init__(self, *args, **kwargs): kwargs.setdefault('pool', BasePool(2)) - super(MyKombuConsumer, self).__init__(*args, **kwargs) + super(_MyKombuConsumer, self).__init__(*args, **kwargs) def restart_heartbeat(self): self.heart = None +class MyKombuConsumer(Consumer): + + def loop(self, *args, **kwargs): + pass + + class MockNode(object): commands = [] @@ -81,237 +103,147 @@ class MockHeart(object): self.closed = True -@task_dec() -def foo_task(x, y, z, **kwargs): - return x * y * z - - -@periodic_task_dec(run_every=60) -def foo_periodic_task(): - return 'foo' - - def create_message(channel, **data): data.setdefault('id', uuid()) channel.no_ack_consumers = set() - return Message(channel, body=pickle.dumps(dict(**data)), - content_type='application/x-python-serialize', - content_encoding='binary', - delivery_info={'consumer_tag': 'mock'}) + m = Message(channel, body=pickle.dumps(dict(**data)), + content_type='application/x-python-serialize', + content_encoding='binary', + delivery_info={'consumer_tag': 'mock'}) + m.accept = ['application/x-python-serialize'] + return m -class test_QoS(Case): +class test_Consumer(AppCase): - class _QoS(QoS): - def __init__(self, value): - self.value = value - QoS.__init__(self, None, value) - - def set(self, value): - return value - - def test_qos_increment_decrement(self): - qos = self._QoS(10) - self.assertEqual(qos.increment_eventually(), 11) - self.assertEqual(qos.increment_eventually(3), 14) - self.assertEqual(qos.increment_eventually(-30), 14) - self.assertEqual(qos.decrement_eventually(7), 7) - self.assertEqual(qos.decrement_eventually(), 6) - - def test_qos_disabled_increment_decrement(self): - qos = self._QoS(0) - self.assertEqual(qos.increment_eventually(), 0) - self.assertEqual(qos.increment_eventually(3), 0) - self.assertEqual(qos.increment_eventually(-30), 0) - self.assertEqual(qos.decrement_eventually(7), 0) - self.assertEqual(qos.decrement_eventually(), 0) - self.assertEqual(qos.decrement_eventually(10), 0) - - def test_qos_thread_safe(self): - qos = self._QoS(10) - - def add(): - for i in xrange(1000): - qos.increment_eventually() - - def sub(): - for i in xrange(1000): - qos.decrement_eventually() - - def threaded(funs): - from threading import Thread - threads = [Thread(target=fun) for fun in funs] - for thread in threads: - thread.start() - for thread in threads: - thread.join() - - threaded([add, add]) - self.assertEqual(qos.value, 2010) - - qos.value = 1000 - threaded([add, sub]) # n = 2 - self.assertEqual(qos.value, 1000) - - def test_exceeds_short(self): - qos = QoS(Mock(), PREFETCH_COUNT_MAX - 1) - qos.update() - self.assertEqual(qos.value, PREFETCH_COUNT_MAX - 1) - qos.increment_eventually() - self.assertEqual(qos.value, PREFETCH_COUNT_MAX) - qos.increment_eventually() - self.assertEqual(qos.value, PREFETCH_COUNT_MAX + 1) - qos.decrement_eventually() - self.assertEqual(qos.value, PREFETCH_COUNT_MAX) - qos.decrement_eventually() - self.assertEqual(qos.value, PREFETCH_COUNT_MAX - 1) - - def test_consumer_increment_decrement(self): - consumer = Mock() - qos = QoS(consumer, 10) - qos.update() - self.assertEqual(qos.value, 10) - consumer.qos.assert_called_with(prefetch_count=10) - qos.decrement_eventually() - qos.update() - self.assertEqual(qos.value, 9) - consumer.qos.assert_called_with(prefetch_count=9) - qos.decrement_eventually() - self.assertEqual(qos.value, 8) - consumer.qos.assert_called_with(prefetch_count=9) - self.assertIn({'prefetch_count': 9}, consumer.qos.call_args) - - # Does not decrement 0 value - qos.value = 0 - qos.decrement_eventually() - self.assertEqual(qos.value, 0) - qos.increment_eventually() - self.assertEqual(qos.value, 0) - - def test_consumer_decrement_eventually(self): - consumer = Mock() - qos = QoS(consumer, 10) - qos.decrement_eventually() - self.assertEqual(qos.value, 9) - qos.value = 0 - qos.decrement_eventually() - self.assertEqual(qos.value, 0) - - def test_set(self): - consumer = Mock() - qos = QoS(consumer, 10) - qos.set(12) - self.assertEqual(qos.prev, 12) - qos.set(qos.prev) - - -class test_Consumer(Case): - - def setUp(self): - self.ready_queue = FastQueue() + def setup(self): + self.buffer = FastQueue() self.timer = Timer() - def tearDown(self): + @self.app.task(shared=False) + def foo_task(x, y, z): + return x * y * z + self.foo_task = foo_task + + def teardown(self): self.timer.stop() def test_info(self): - l = MyKombuConsumer(self.ready_queue, timer=self.timer) - l.qos = QoS(l.task_consumer, 10) - info = l.info + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.task_consumer = Mock() + l.qos = QoS(l.task_consumer.qos, 10) + l.connection = Mock() + l.connection.info.return_value = {'foo': 'bar'} + l.controller = l.app.WorkController() + l.controller.pool = Mock() + l.controller.pool.info.return_value = [Mock(), Mock()] + l.controller.consumer = l + info = l.controller.stats() self.assertEqual(info['prefetch_count'], 10) - self.assertFalse(info['broker']) - - l.connection = current_app.connection() - info = l.info self.assertTrue(info['broker']) def test_start_when_closed(self): - l = MyKombuConsumer(self.ready_queue, timer=self.timer) - l._state = CLOSE + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = CLOSE l.start() def test_connection(self): - l = MyKombuConsumer(self.ready_queue, timer=self.timer) + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) - l.reset_connection() + l.blueprint.start(l) self.assertIsInstance(l.connection, Connection) - l._state = RUN + l.blueprint.state = RUN l.event_dispatcher = None - l.stop_consumers(close_connection=False) + l.blueprint.restart(l) self.assertTrue(l.connection) - l._state = RUN - l.stop_consumers() + l.blueprint.state = RUN + l.shutdown() self.assertIsNone(l.connection) self.assertIsNone(l.task_consumer) - l.reset_connection() + l.blueprint.start(l) self.assertIsInstance(l.connection, Connection) - l.stop_consumers() + l.blueprint.restart(l) l.stop() - l.close_connection() + l.shutdown() self.assertIsNone(l.connection) self.assertIsNone(l.task_consumer) def test_close_connection(self): - l = MyKombuConsumer(self.ready_queue, timer=self.timer) - l._state = RUN - l.close_connection() + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN + step = find_step(l, consumer.Connection) + conn = l.connection = Mock() + step.shutdown(l) + self.assertTrue(conn.close.called) + self.assertIsNone(l.connection) - l = MyKombuConsumer(self.ready_queue, timer=self.timer) + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) eventer = l.event_dispatcher = Mock() eventer.enabled = True heart = l.heart = MockHeart() - l._state = RUN - l.stop_consumers() + l.blueprint.state = RUN + Events = find_step(l, consumer.Events) + Events.shutdown(l) + Heart = find_step(l, consumer.Heart) + Heart.shutdown(l) self.assertTrue(eventer.close.call_count) self.assertTrue(heart.closed) @patch('celery.worker.consumer.warn') def test_receive_message_unknown(self, warn): - l = MyKombuConsumer(self.ready_queue, timer=self.timer) + l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN + l.steps.pop() backend = Mock() m = create_message(backend, unknown={'baz': '!!!'}) l.event_dispatcher = Mock() - l.pidbox_node = MockNode() + l.node = MockNode() - l.receive_message(m.decode(), m) + callback = self._get_on_message(l) + callback(m.decode(), m) self.assertTrue(warn.call_count) - @patch('celery.worker.consumer.to_timestamp') + @patch('celery.worker.strategy.to_timestamp') def test_receive_message_eta_OverflowError(self, to_timestamp): to_timestamp.side_effect = OverflowError() - l = MyKombuConsumer(self.ready_queue, timer=self.timer) - m = create_message(Mock(), task=foo_task.name, + l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN + l.steps.pop() + m = create_message(Mock(), task=self.foo_task.name, args=('2, 2'), kwargs={}, eta=datetime.now().isoformat()) l.event_dispatcher = Mock() - l.pidbox_node = MockNode() + l.node = MockNode() l.update_strategies() l.qos = Mock() - l.receive_message(m.decode(), m) - self.assertTrue(to_timestamp.called) + callback = self._get_on_message(l) + callback(m.decode(), m) self.assertTrue(m.acknowledged) @patch('celery.worker.consumer.error') def test_receive_message_InvalidTaskError(self, error): - l = MyKombuConsumer(self.ready_queue, timer=self.timer) - m = create_message(Mock(), task=foo_task.name, + l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN + l.event_dispatcher = Mock() + l.steps.pop() + m = create_message(Mock(), task=self.foo_task.name, args=(1, 2), kwargs='foobarbaz', id=1) l.update_strategies() l.event_dispatcher = Mock() - l.pidbox_node = MockNode() - l.receive_message(m.decode(), m) + callback = self._get_on_message(l) + callback(m.decode(), m) self.assertIn('Received invalid task message', error.call_args[0][0]) @patch('celery.worker.consumer.crit') def test_on_decode_error(self, crit): - l = MyKombuConsumer(self.ready_queue, timer=self.timer) + l = Consumer(self.buffer.put, timer=self.timer, app=self.app) class MockMessage(Mock): content_type = 'application/x-msgpack' @@ -323,118 +255,128 @@ class test_Consumer(Case): self.assertTrue(message.ack.call_count) self.assertIn("Can't decode message body", crit.call_args[0][0]) + def _get_on_message(self, l): + if l.qos is None: + l.qos = Mock() + l.event_dispatcher = Mock() + l.task_consumer = Mock() + l.connection = Mock() + l.connection.drain_events.side_effect = SystemExit() + + with self.assertRaises(SystemExit): + l.loop(*l.loop_args()) + self.assertTrue(l.task_consumer.register_callback.called) + return l.task_consumer.register_callback.call_args[0][0] + def test_receieve_message(self): - l = MyKombuConsumer(self.ready_queue, timer=self.timer) - m = create_message(Mock(), task=foo_task.name, + l = Consumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN + l.event_dispatcher = Mock() + m = create_message(Mock(), task=self.foo_task.name, args=[2, 4, 8], kwargs={}) l.update_strategies() + callback = self._get_on_message(l) + callback(m.decode(), m) - l.event_dispatcher = Mock() - l.receive_message(m.decode(), m) - - in_bucket = self.ready_queue.get_nowait() + in_bucket = self.buffer.get_nowait() self.assertIsInstance(in_bucket, Request) - self.assertEqual(in_bucket.name, foo_task.name) + self.assertEqual(in_bucket.name, self.foo_task.name) self.assertEqual(in_bucket.execute(), 2 * 4 * 8) self.assertTrue(self.timer.empty()) + def test_start_channel_error(self): + + class MockConsumer(Consumer): + iterations = 0 + + def loop(self, *args, **kwargs): + if not self.iterations: + self.iterations = 1 + raise KeyError('foo') + raise SyntaxError('bar') + + l = MockConsumer(self.buffer.put, timer=self.timer, + send_events=False, pool=BasePool(), app=self.app) + l.channel_errors = (KeyError, ) + with self.assertRaises(KeyError): + l.start() + l.timer.stop() + def test_start_connection_error(self): - class MockConsumer(BlockingConsumer): + class MockConsumer(Consumer): iterations = 0 - def consume_messages(self): + def loop(self, *args, **kwargs): if not self.iterations: self.iterations = 1 raise KeyError('foo') raise SyntaxError('bar') - l = MockConsumer(self.ready_queue, timer=self.timer, - send_events=False, pool=BasePool()) + l = MockConsumer(self.buffer.put, timer=self.timer, + send_events=False, pool=BasePool(), app=self.app) + l.connection_errors = (KeyError, ) - with self.assertRaises(SyntaxError): - l.start() - l.heart.stop() - l.timer.stop() - - def test_start_channel_error(self): - # Regression test for AMQPChannelExceptions that can occur within the - # consumer. (i.e. 404 errors) - - class MockConsumer(BlockingConsumer): - iterations = 0 - - def consume_messages(self): - if not self.iterations: - self.iterations = 1 - raise KeyError('foo') - raise SyntaxError('bar') - - l = MockConsumer(self.ready_queue, timer=self.timer, - send_events=False, pool=BasePool()) - - l.channel_errors = (KeyError, ) self.assertRaises(SyntaxError, l.start) - l.heart.stop() l.timer.stop() - def test_consume_messages_ignores_socket_timeout(self): + def test_loop_ignores_socket_timeout(self): - class Connection(current_app.connection().__class__): + class Connection(self.app.connection().__class__): obj = None def drain_events(self, **kwargs): self.obj.connection = None raise socket.timeout(10) - l = MyKombuConsumer(self.ready_queue, timer=self.timer) + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) l.connection = Connection() l.task_consumer = Mock() l.connection.obj = l - l.qos = QoS(l.task_consumer, 10) - l.consume_messages() + l.qos = QoS(l.task_consumer.qos, 10) + l.loop(*l.loop_args()) - def test_consume_messages_when_socket_error(self): + def test_loop_when_socket_error(self): - class Connection(current_app.connection().__class__): + class Connection(self.app.connection().__class__): obj = None def drain_events(self, **kwargs): self.obj.connection = None raise socket.error('foo') - l = MyKombuConsumer(self.ready_queue, timer=self.timer) - l._state = RUN + l = Consumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN c = l.connection = Connection() l.connection.obj = l l.task_consumer = Mock() - l.qos = QoS(l.task_consumer, 10) + l.qos = QoS(l.task_consumer.qos, 10) with self.assertRaises(socket.error): - l.consume_messages() + l.loop(*l.loop_args()) - l._state = CLOSE + l.blueprint.state = CLOSE l.connection = c - l.consume_messages() + l.loop(*l.loop_args()) - def test_consume_messages(self): + def test_loop(self): - class Connection(current_app.connection().__class__): + class Connection(self.app.connection().__class__): obj = None def drain_events(self, **kwargs): self.obj.connection = None - l = MyKombuConsumer(self.ready_queue, timer=self.timer) + l = Consumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN l.connection = Connection() l.connection.obj = l l.task_consumer = Mock() - l.qos = QoS(l.task_consumer, 10) + l.qos = QoS(l.task_consumer.qos, 10) - l.consume_messages() - l.consume_messages() + l.loop(*l.loop_args()) + l.loop(*l.loop_args()) self.assertTrue(l.task_consumer.consume.call_count) l.task_consumer.qos.assert_called_with(prefetch_count=10) - l.task_consumer.qos = Mock() self.assertEqual(l.qos.value, 10) l.qos.decrement_eventually() self.assertEqual(l.qos.value, 9) @@ -442,19 +384,19 @@ class test_Consumer(Case): self.assertEqual(l.qos.value, 9) l.task_consumer.qos.assert_called_with(prefetch_count=9) - def test_maybe_conn_error(self): - l = MyKombuConsumer(self.ready_queue, timer=self.timer) - l.connection_errors = (KeyError, ) + def test_ignore_errors(self): + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.connection_errors = (AttributeError, KeyError, ) l.channel_errors = (SyntaxError, ) - l.maybe_conn_error(Mock(side_effect=AttributeError('foo'))) - l.maybe_conn_error(Mock(side_effect=KeyError('foo'))) - l.maybe_conn_error(Mock(side_effect=SyntaxError('foo'))) + ignore_errors(l, Mock(side_effect=AttributeError('foo'))) + ignore_errors(l, Mock(side_effect=KeyError('foo'))) + ignore_errors(l, Mock(side_effect=SyntaxError('foo'))) with self.assertRaises(IndexError): - l.maybe_conn_error(Mock(side_effect=IndexError('foo'))) + ignore_errors(l, Mock(side_effect=IndexError('foo'))) def test_apply_eta_task(self): from celery.worker import state - l = MyKombuConsumer(self.ready_queue, timer=self.timer) + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) l.qos = QoS(None, 10) task = object() @@ -462,82 +404,92 @@ class test_Consumer(Case): l.apply_eta_task(task) self.assertIn(task, state.reserved_requests) self.assertEqual(l.qos.value, qos - 1) - self.assertIs(self.ready_queue.get_nowait(), task) + self.assertIs(self.buffer.get_nowait(), task) def test_receieve_message_eta_isoformat(self): - if sys.version_info < (2, 6): - raise SkipTest('test broken on Python 2.5') - l = MyKombuConsumer(self.ready_queue, timer=self.timer) - m = create_message(Mock(), task=foo_task.name, - eta=datetime.now().isoformat(), - args=[2, 4, 8], kwargs={}) + l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN + l.steps.pop() + m = create_message( + Mock(), task=self.foo_task.name, + eta=(datetime.now() + timedelta(days=1)).isoformat(), + args=[2, 4, 8], kwargs={}, + ) l.task_consumer = Mock() - l.qos = QoS(l.task_consumer, l.initial_prefetch_count) + l.qos = QoS(l.task_consumer.qos, 1) current_pcount = l.qos.value l.event_dispatcher = Mock() l.enabled = False l.update_strategies() - l.receive_message(m.decode(), m) + callback = self._get_on_message(l) + callback(m.decode(), m) l.timer.stop() l.timer.join(1) items = [entry[2] for entry in self.timer.queue] found = 0 for item in items: - if item.args[0].name == foo_task.name: + if item.args[0].name == self.foo_task.name: found = True self.assertTrue(found) self.assertGreater(l.qos.value, current_pcount) l.timer.stop() - def test_on_control(self): - l = MyKombuConsumer(self.ready_queue, timer=self.timer) - l.pidbox_node = Mock() - l.reset_pidbox_node = Mock() + def test_pidbox_callback(self): + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + con = find_step(l, consumer.Control).box + con.node = Mock() + con.reset = Mock() - l.on_control('foo', 'bar') - l.pidbox_node.handle_message.assert_called_with('foo', 'bar') + con.on_message('foo', 'bar') + con.node.handle_message.assert_called_with('foo', 'bar') - l.pidbox_node = Mock() - l.pidbox_node.handle_message.side_effect = KeyError('foo') - l.on_control('foo', 'bar') - l.pidbox_node.handle_message.assert_called_with('foo', 'bar') + con.node = Mock() + con.node.handle_message.side_effect = KeyError('foo') + con.on_message('foo', 'bar') + con.node.handle_message.assert_called_with('foo', 'bar') - l.pidbox_node = Mock() - l.pidbox_node.handle_message.side_effect = ValueError('foo') - l.on_control('foo', 'bar') - l.pidbox_node.handle_message.assert_called_with('foo', 'bar') - l.reset_pidbox_node.assert_called_with() + con.node = Mock() + con.node.handle_message.side_effect = ValueError('foo') + con.on_message('foo', 'bar') + con.node.handle_message.assert_called_with('foo', 'bar') + self.assertTrue(con.reset.called) def test_revoke(self): - ready_queue = FastQueue() - l = MyKombuConsumer(ready_queue, timer=self.timer) + l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN + l.steps.pop() backend = Mock() id = uuid() - t = create_message(backend, task=foo_task.name, args=[2, 4, 8], + t = create_message(backend, task=self.foo_task.name, args=[2, 4, 8], kwargs={}, id=id) from celery.worker.state import revoked revoked.add(id) - l.receive_message(t.decode(), t) - self.assertTrue(ready_queue.empty()) + callback = self._get_on_message(l) + callback(t.decode(), t) + self.assertTrue(self.buffer.empty()) def test_receieve_message_not_registered(self): - l = MyKombuConsumer(self.ready_queue, timer=self.timer) + l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN + l.steps.pop() backend = Mock() m = create_message(backend, task='x.X.31x', args=[2, 4, 8], kwargs={}) l.event_dispatcher = Mock() - self.assertFalse(l.receive_message(m.decode(), m)) + callback = self._get_on_message(l) + self.assertFalse(callback(m.decode(), m)) with self.assertRaises(Empty): - self.ready_queue.get_nowait() + self.buffer.get_nowait() self.assertTrue(self.timer.empty()) @patch('celery.worker.consumer.warn') @patch('celery.worker.consumer.logger') def test_receieve_message_ack_raises(self, logger, warn): - l = MyKombuConsumer(self.ready_queue, timer=self.timer) + l = Consumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN backend = Mock() m = create_message(backend, args=[2, 4, 8], kwargs={}) @@ -545,66 +497,82 @@ class test_Consumer(Case): l.connection_errors = (socket.error, ) m.reject = Mock() m.reject.side_effect = socket.error('foo') - self.assertFalse(l.receive_message(m.decode(), m)) + callback = self._get_on_message(l) + self.assertFalse(callback(m.decode(), m)) self.assertTrue(warn.call_count) with self.assertRaises(Empty): - self.ready_queue.get_nowait() + self.buffer.get_nowait() self.assertTrue(self.timer.empty()) - m.reject.assert_called_with() + m.reject.assert_called_with(requeue=False) self.assertTrue(logger.critical.call_count) - def test_receieve_message_eta(self): - l = MyKombuConsumer(self.ready_queue, timer=self.timer) + def test_receive_message_eta(self): + l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.steps.pop() l.event_dispatcher = Mock() l.event_dispatcher._outbound_buffer = deque() backend = Mock() m = create_message( - backend, task=foo_task.name, + backend, task=self.foo_task.name, args=[2, 4, 8], kwargs={}, eta=(datetime.now() + timedelta(days=1)).isoformat(), ) - l.reset_connection() - p = l.app.conf.BROKER_CONNECTION_RETRY - l.app.conf.BROKER_CONNECTION_RETRY = False try: - l.reset_connection() - finally: + l.blueprint.start(l) + p = l.app.conf.BROKER_CONNECTION_RETRY + l.app.conf.BROKER_CONNECTION_RETRY = False + l.blueprint.start(l) l.app.conf.BROKER_CONNECTION_RETRY = p - l.stop_consumers() - l.event_dispatcher = Mock() - l.receive_message(m.decode(), m) - l.timer.stop() + l.blueprint.restart(l) + l.event_dispatcher = Mock() + callback = self._get_on_message(l) + callback(m.decode(), m) + finally: + l.timer.stop() + l.timer.join() + in_hold = l.timer.queue[0] self.assertEqual(len(in_hold), 3) eta, priority, entry = in_hold task = entry.args[0] self.assertIsInstance(task, Request) - self.assertEqual(task.name, foo_task.name) + self.assertEqual(task.name, self.foo_task.name) self.assertEqual(task.execute(), 2 * 4 * 8) with self.assertRaises(Empty): - self.ready_queue.get_nowait() + self.buffer.get_nowait() def test_reset_pidbox_node(self): - l = MyKombuConsumer(self.ready_queue, timer=self.timer) - l.pidbox_node = Mock() - chan = l.pidbox_node.channel = Mock() + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + con = find_step(l, consumer.Control).box + con.node = Mock() + chan = con.node.channel = Mock() l.connection = Mock() chan.close.side_effect = socket.error('foo') l.connection_errors = (socket.error, ) - l.reset_pidbox_node() + con.reset() chan.close.assert_called_with() def test_reset_pidbox_node_green(self): - l = MyKombuConsumer(self.ready_queue, timer=self.timer) - l.pool = Mock() - l.pool.is_green = True - l.reset_pidbox_node() - l.pool.spawn_n.assert_called_with(l._green_pidbox_node) + from celery.worker.pidbox import gPidbox + pool = Mock() + pool.is_green = True + l = MyKombuConsumer(self.buffer.put, timer=self.timer, pool=pool, + app=self.app) + con = find_step(l, consumer.Control) + self.assertIsInstance(con.box, gPidbox) + con.start(l) + l.pool.spawn_n.assert_called_with( + con.box.loop, l, + ) def test__green_pidbox_node(self): - l = MyKombuConsumer(self.ready_queue, timer=self.timer) - l.pidbox_node = Mock() + pool = Mock() + pool.is_green = True + l = MyKombuConsumer(self.buffer.put, timer=self.timer, pool=pool, + app=self.app) + l.node = Mock() + controller = find_step(l, consumer.Control) class BConsumer(Mock): @@ -615,7 +583,7 @@ class test_Consumer(Case): def __exit__(self, *exc_info): self.cancel() - l.pidbox_node.listen = BConsumer() + controller.box.node.listen = BConsumer() connections = [] class Connection(object): @@ -644,45 +612,47 @@ class test_Consumer(Case): self.calls += 1 raise socket.timeout() self.obj.connection = None - self.obj._pidbox_node_shutdown.set() + controller.box._node_shutdown.set() def close(self): self.closed = True l.connection = Mock() - l._open_connection = lambda: Connection(obj=l) - l._green_pidbox_node() + l.connect = lambda: Connection(obj=l) + controller = find_step(l, consumer.Control) + controller.box.loop(l) - l.pidbox_node.listen.assert_called_with(callback=l.on_control) - self.assertTrue(l.broadcast_consumer) - l.broadcast_consumer.consume.assert_called_with() + self.assertTrue(controller.box.node.listen.called) + self.assertTrue(controller.box.consumer) + controller.box.consumer.consume.assert_called_with() self.assertIsNone(l.connection) self.assertTrue(connections[0].closed) @patch('kombu.connection.Connection._establish_connection') @patch('kombu.utils.sleep') - def test_open_connection_errback(self, sleep, connect): - l = MyKombuConsumer(self.ready_queue, timer=self.timer) + def test_connect_errback(self, sleep, connect): + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) from kombu.transport.memory import Transport - Transport.connection_errors = (StdChannelError, ) + Transport.connection_errors = (ChannelError, ) def effect(): if connect.call_count > 1: return - raise StdChannelError() + raise ChannelError('error') connect.side_effect = effect - l._open_connection() + l.connect() connect.assert_called_with() def test_stop_pidbox_node(self): - l = MyKombuConsumer(self.ready_queue, timer=self.timer) - l._pidbox_node_stopped = Event() - l._pidbox_node_shutdown = Event() - l._pidbox_node_stopped.set() - l.stop_pidbox_node() + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + cont = find_step(l, consumer.Control) + cont._node_stopped = Event() + cont._node_shutdown = Event() + cont._node_stopped.set() + cont.stop(l) - def test_start__consume_messages(self): + def test_start__loop(self): class _QoS(object): prev = 3 @@ -699,61 +669,44 @@ class test_Consumer(Case): raise KeyError('foo') init_callback = Mock() - l = _Consumer(self.ready_queue, timer=self.timer, - init_callback=init_callback) + l = _Consumer(self.buffer.put, timer=self.timer, + init_callback=init_callback, app=self.app) l.task_consumer = Mock() l.broadcast_consumer = Mock() l.qos = _QoS() l.connection = Connection() l.iterations = 0 - def raises_KeyError(limit=None): + def raises_KeyError(*args, **kwargs): l.iterations += 1 if l.qos.prev != l.qos.value: l.qos.update() if l.iterations >= 2: raise KeyError('foo') - l.consume_messages = raises_KeyError + l.loop = raises_KeyError with self.assertRaises(KeyError): l.start() - self.assertTrue(init_callback.call_count) - self.assertEqual(l.iterations, 1) + self.assertEqual(l.iterations, 2) self.assertEqual(l.qos.prev, l.qos.value) init_callback.reset_mock() - l = _Consumer(self.ready_queue, timer=self.timer, + l = _Consumer(self.buffer.put, timer=self.timer, app=self.app, send_events=False, init_callback=init_callback) l.qos = _QoS() l.task_consumer = Mock() l.broadcast_consumer = Mock() l.connection = Connection() - l.consume_messages = Mock(side_effect=socket.error('foo')) + l.loop = Mock(side_effect=socket.error('foo')) with self.assertRaises(socket.error): l.start() - self.assertTrue(init_callback.call_count) - self.assertTrue(l.consume_messages.call_count) + self.assertTrue(l.loop.call_count) def test_reset_connection_with_no_node(self): - l = BlockingConsumer(self.ready_queue, timer=self.timer) + l = Consumer(self.buffer.put, timer=self.timer, app=self.app) + l.steps.pop() self.assertEqual(None, l.pool) - l.reset_connection() - - def test_on_task_revoked(self): - l = BlockingConsumer(self.ready_queue, timer=self.timer) - task = Mock() - task.revoked.return_value = True - l.on_task(task) - - def test_on_task_no_events(self): - l = BlockingConsumer(self.ready_queue, timer=self.timer) - task = Mock() - task.revoked.return_value = False - l.event_dispatcher = Mock() - l.event_dispatcher.enabled = False - task.eta = None - l._does_info = False - l.on_task(task) + l.blueprint.start(l) class test_WorkController(AppCase): @@ -762,22 +715,73 @@ class test_WorkController(AppCase): self.worker = self.create_worker() from celery import worker self._logger = worker.logger + self._comp_logger = components.logger self.logger = worker.logger = Mock() + self.comp_logger = components.logger = Mock() + + @self.app.task(shared=False) + def foo_task(x, y, z): + return x * y * z + self.foo_task = foo_task def teardown(self): from celery import worker worker.logger = self._logger + components.logger = self._comp_logger def create_worker(self, **kw): worker = self.app.WorkController(concurrency=1, loglevel=0, **kw) - worker._shutdown_complete.set() + worker.blueprint.shutdown_complete.set() return worker + def test_on_consumer_ready(self): + self.worker.on_consumer_ready(Mock()) + + def test_setup_queues_worker_direct(self): + self.app.conf.CELERY_WORKER_DIRECT = True + self.app.amqp.__dict__['queues'] = Mock() + self.worker.setup_queues({}) + self.app.amqp.queues.select_add.assert_called_with( + worker_direct(self.worker.hostname), + ) + + def test_send_worker_shutdown(self): + with patch('celery.signals.worker_shutdown') as ws: + self.worker._send_worker_shutdown() + ws.send.assert_called_with(sender=self.worker) + + def test_process_shutdown_on_worker_shutdown(self): + from celery.concurrency.prefork import process_destructor + from celery.concurrency.asynpool import Worker + with patch('celery.signals.worker_process_shutdown') as ws: + Worker._make_shortcuts = Mock() + with patch('os._exit') as _exit: + worker = Worker(None, None, on_exit=process_destructor) + worker._do_exit(22, 3.1415926) + ws.send.assert_called_with( + sender=None, pid=22, exitcode=3.1415926, + ) + _exit.assert_called_with(3.1415926) + + def test_process_task_revoked_release_semaphore(self): + self.worker._quick_release = Mock() + req = Mock() + req.execute_using_pool.side_effect = TaskRevokedError + self.worker._process_task(req) + self.worker._quick_release.assert_called_with() + + delattr(self.worker, '_quick_release') + self.worker._process_task(req) + + def test_shutdown_no_blueprint(self): + self.worker.blueprint = None + self.worker._shutdown() + @patch('celery.platforms.create_pidlock') def test_use_pidfile(self, create_pidlock): create_pidlock.return_value = Mock() worker = self.create_worker(pidfile='pidfilelockfilepid') - worker.components = [] + worker.steps = [] worker.start() self.assertTrue(create_pidlock.called) worker.stop() @@ -786,51 +790,56 @@ class test_WorkController(AppCase): @patch('celery.platforms.signals') @patch('celery.platforms.set_mp_process_title') def test_process_initializer(self, set_mp_process_title, _signals): - from celery import Celery - from celery import signals - from celery._state import _tls - from celery.concurrency.processes import process_initializer - from celery.concurrency.processes import (WORKER_SIGRESET, - WORKER_SIGIGNORE) + with restore_logging(): + from celery import signals + from celery._state import _tls + from celery.concurrency.prefork import ( + process_initializer, WORKER_SIGRESET, WORKER_SIGIGNORE, + ) - def on_worker_process_init(**kwargs): - on_worker_process_init.called = True - on_worker_process_init.called = False - signals.worker_process_init.connect(on_worker_process_init) + def on_worker_process_init(**kwargs): + on_worker_process_init.called = True + on_worker_process_init.called = False + signals.worker_process_init.connect(on_worker_process_init) - loader = Mock() - loader.override_backends = {} - app = Celery(loader=loader, set_as_current=False) - app.loader = loader - app.conf = AttributeDict(DEFAULTS) - process_initializer(app, 'awesome.worker.com') - _signals.ignore.assert_any_call(*WORKER_SIGIGNORE) - _signals.reset.assert_any_call(*WORKER_SIGRESET) - self.assertTrue(app.loader.init_worker.call_count) - self.assertTrue(on_worker_process_init.called) - self.assertIs(_tls.current_app, app) - set_mp_process_title.assert_called_with( - 'celeryd', hostname='awesome.worker.com', - ) + def Loader(*args, **kwargs): + loader = Mock(*args, **kwargs) + loader.conf = {} + loader.override_backends = {} + return loader - def test_with_rate_limits_disabled(self): - worker = WorkController(concurrency=1, loglevel=0, - disable_rate_limits=True) - self.assertTrue(hasattr(worker.ready_queue, 'put')) + with self.Celery(loader=Loader) as app: + app.conf = AttributeDict(DEFAULTS) + process_initializer(app, 'awesome.worker.com') + _signals.ignore.assert_any_call(*WORKER_SIGIGNORE) + _signals.reset.assert_any_call(*WORKER_SIGRESET) + self.assertTrue(app.loader.init_worker.call_count) + self.assertTrue(on_worker_process_init.called) + self.assertIs(_tls.current_app, app) + set_mp_process_title.assert_called_with( + 'celeryd', hostname='awesome.worker.com', + ) + + with patch('celery.app.trace.setup_worker_optimizations') as S: + os.environ['FORKED_BY_MULTIPROCESSING'] = "1" + try: + process_initializer(app, 'luke.worker.com') + S.assert_called_with(app) + finally: + os.environ.pop('FORKED_BY_MULTIPROCESSING', None) def test_attrs(self): worker = self.worker + self.assertIsNotNone(worker.timer) self.assertIsInstance(worker.timer, Timer) - self.assertTrue(worker.timer) - self.assertTrue(worker.pool) - self.assertTrue(worker.consumer) - self.assertTrue(worker.mediator) - self.assertTrue(worker.components) + self.assertIsNotNone(worker.pool) + self.assertIsNotNone(worker.consumer) + self.assertTrue(worker.steps) - def test_with_embedded_celerybeat(self): - worker = WorkController(concurrency=1, loglevel=0, beat=True) + def test_with_embedded_beat(self): + worker = self.app.WorkController(concurrency=1, loglevel=0, beat=True) self.assertTrue(worker.beat) - self.assertIn(worker.beat, worker.components) + self.assertIn(worker.beat, [w.obj for w in worker.steps]) def test_with_autoscaler(self): worker = self.create_worker( @@ -840,37 +849,37 @@ class test_WorkController(AppCase): self.assertTrue(worker.autoscaler) def test_dont_stop_or_terminate(self): - worker = WorkController(concurrency=1, loglevel=0) + worker = self.app.WorkController(concurrency=1, loglevel=0) worker.stop() - self.assertNotEqual(worker._state, worker.CLOSE) + self.assertNotEqual(worker.blueprint.state, CLOSE) worker.terminate() - self.assertNotEqual(worker._state, worker.CLOSE) + self.assertNotEqual(worker.blueprint.state, CLOSE) sigsafe, worker.pool.signal_safe = worker.pool.signal_safe, False try: - worker._state = worker.RUN + worker.blueprint.state = RUN worker.stop(in_sighandler=True) - self.assertNotEqual(worker._state, worker.CLOSE) + self.assertNotEqual(worker.blueprint.state, CLOSE) worker.terminate(in_sighandler=True) - self.assertNotEqual(worker._state, worker.CLOSE) + self.assertNotEqual(worker.blueprint.state, CLOSE) finally: worker.pool.signal_safe = sigsafe def test_on_timer_error(self): - worker = WorkController(concurrency=1, loglevel=0) + worker = self.app.WorkController(concurrency=1, loglevel=0) try: raise KeyError('foo') - except KeyError, exc: - Timers(worker).on_timer_error(exc) - msg, args = self.logger.error.call_args[0] + except KeyError as exc: + components.Timer(worker).on_timer_error(exc) + msg, args = self.comp_logger.error.call_args[0] self.assertIn('KeyError', msg % args) def test_on_timer_tick(self): - worker = WorkController(concurrency=1, loglevel=10) + worker = self.app.WorkController(concurrency=1, loglevel=10) - Timers(worker).on_timer_tick(30.0) - xargs = self.logger.debug.call_args[0] + components.Timer(worker).on_timer_tick(30.0) + xargs = self.comp_logger.debug.call_args[0] fmt, arg = xargs[0], xargs[1] self.assertEqual(30.0, arg) self.assertIn('Next eta %s secs', fmt) @@ -879,10 +888,10 @@ class test_WorkController(AppCase): worker = self.worker worker.pool = Mock() backend = Mock() - m = create_message(backend, task=foo_task.name, args=[4, 8, 10], + m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10], kwargs={}) - task = Request.from_message(m, m.decode()) - worker.process_task(task) + task = Request(m.decode(), message=m, app=self.app) + worker._process_task(task) self.assertEqual(worker.pool.apply_async.call_count, 1) worker.pool.stop() @@ -891,53 +900,54 @@ class test_WorkController(AppCase): worker.pool = Mock() worker.pool.apply_async.side_effect = KeyboardInterrupt('Ctrl+C') backend = Mock() - m = create_message(backend, task=foo_task.name, args=[4, 8, 10], + m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10], kwargs={}) - task = Request.from_message(m, m.decode()) - worker.components = [] - worker._state = worker.RUN + task = Request(m.decode(), message=m, app=self.app) + worker.steps = [] + worker.blueprint.state = RUN with self.assertRaises(KeyboardInterrupt): - worker.process_task(task) - self.assertEqual(worker._state, worker.TERMINATE) + worker._process_task(task) def test_process_task_raise_SystemTerminate(self): worker = self.worker worker.pool = Mock() worker.pool.apply_async.side_effect = SystemTerminate() backend = Mock() - m = create_message(backend, task=foo_task.name, args=[4, 8, 10], + m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10], kwargs={}) - task = Request.from_message(m, m.decode()) - worker.components = [] - worker._state = worker.RUN + task = Request(m.decode(), message=m, app=self.app) + worker.steps = [] + worker.blueprint.state = RUN with self.assertRaises(SystemExit): - worker.process_task(task) - self.assertEqual(worker._state, worker.TERMINATE) + worker._process_task(task) def test_process_task_raise_regular(self): worker = self.worker worker.pool = Mock() worker.pool.apply_async.side_effect = KeyError('some exception') backend = Mock() - m = create_message(backend, task=foo_task.name, args=[4, 8, 10], + m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10], kwargs={}) - task = Request.from_message(m, m.decode()) - worker.process_task(task) + task = Request(m.decode(), message=m, app=self.app) + worker._process_task(task) worker.pool.stop() def test_start_catches_base_exceptions(self): worker1 = self.create_worker() - stc = Mock() + worker1.blueprint.state = RUN + stc = MockStep() stc.start.side_effect = SystemTerminate() - worker1.components = [stc] + worker1.steps = [stc] worker1.start() + stc.start.assert_called_with(worker1) self.assertTrue(stc.terminate.call_count) worker2 = self.create_worker() - sec = Mock() + worker2.blueprint.state = RUN + sec = MockStep() sec.start.side_effect = SystemExit() sec.terminate = None - worker2.components = [sec] + worker2.steps = [sec] worker2.start() self.assertTrue(sec.stop.call_count) @@ -952,45 +962,13 @@ class test_WorkController(AppCase): finally: state.Persistent = Persistent - def test_disable_rate_limits_solo(self): - worker = self.create_worker(disable_rate_limits=True, - pool_cls='solo') - self.assertIsInstance(worker.ready_queue, FastQueue) - self.assertIsNone(worker.mediator) - self.assertEqual(worker.ready_queue.put, worker.process_task) - - def test_enable_rate_limits_eventloop(self): - try: - worker = self.create_worker(disable_rate_limits=False, - use_eventloop=True, - pool_cls='processes') - except ImportError: - raise SkipTest('multiprocessing not supported') - self.assertIsInstance(worker.ready_queue, AsyncTaskBucket) - # XXX disabled until 3.1 - #self.assertFalse(worker.mediator) - #self.assertNotEqual(worker.ready_queue.put, worker.process_task) - - def test_disable_rate_limits_processes(self): - raise SkipTest('disabled until v3.1') - try: - worker = self.create_worker(disable_rate_limits=True, - use_eventloop=False, - pool_cls='processes') - except ImportError: - raise SkipTest('multiprocessing not supported') - self.assertIsInstance(worker.ready_queue, FastQueue) - self.assertFalse(worker.mediator) - self.assertEqual(worker.ready_queue.put, worker.process_task) - def test_process_task_sem(self): worker = self.worker - worker.semaphore = Mock() - worker._quick_acquire = worker.semaphore.acquire + worker._quick_acquire = Mock() req = Mock() - worker.process_task_sem(req) - worker.semaphore.acquire.assert_called_with(worker.process_task, req) + worker._process_task_sem(req) + worker._quick_acquire.assert_called_with(worker._process_task, req) def test_signal_consumer_close(self): worker = self.worker @@ -1004,15 +982,23 @@ class test_WorkController(AppCase): def test_start__stop(self): worker = self.worker - worker._shutdown_complete.set() - worker.components = [Mock(), Mock(), Mock(), Mock()] + worker.blueprint.shutdown_complete.set() + worker.steps = [MockStep(StartStopStep(self)) for _ in range(4)] + worker.blueprint.state = RUN + worker.blueprint.started = 4 + for w in worker.steps: + w.start = Mock() + w.close = Mock() + w.stop = Mock() worker.start() - for w in worker.components: + for w in worker.steps: self.assertTrue(w.start.call_count) + worker.consumer = Mock() worker.stop() - for component in worker.components: - self.assertTrue(w.stop.call_count) + for stopstep in worker.steps: + self.assertTrue(stopstep.close.call_count) + self.assertTrue(stopstep.stop.call_count) # Doesn't close pool if no pool. worker.start() @@ -1020,15 +1006,15 @@ class test_WorkController(AppCase): worker.stop() # test that stop of None is not attempted - worker.components[-1] = None + worker.steps[-1] = None worker.start() worker.stop() - def test_component_raises(self): + def test_step_raises(self): worker = self.worker - comp = Mock() - worker.components = [comp] - comp.start.side_effect = TypeError() + step = Mock() + worker.steps = [step] + step.start.side_effect = TypeError() worker.stop = Mock() worker.start() worker.stop.assert_called_with() @@ -1038,85 +1024,67 @@ class test_WorkController(AppCase): def test_start__terminate(self): worker = self.worker - worker._shutdown_complete.set() - worker.components = [Mock(), Mock(), Mock(), Mock(), Mock()] - for component in worker.components[:3]: - component.terminate = None - + worker.blueprint.shutdown_complete.set() + worker.blueprint.started = 5 + worker.blueprint.state = RUN + worker.steps = [MockStep() for _ in range(5)] worker.start() - for w in worker.components[:3]: + for w in worker.steps[:3]: self.assertTrue(w.start.call_count) - self.assertTrue(worker._running, len(worker.components)) - self.assertEqual(worker._state, RUN) + self.assertTrue(worker.blueprint.started, len(worker.steps)) + self.assertEqual(worker.blueprint.state, RUN) worker.terminate() - for component in worker.components[:3]: - self.assertTrue(component.stop.call_count) - self.assertTrue(worker.components[4].terminate.call_count) - - def test_Queues_pool_not_rlimit_safe(self): - w = Mock() - w.pool_cls.rlimit_safe = False - Queues(w).create(w) - self.assertTrue(w.disable_rate_limits) + for step in worker.steps: + self.assertTrue(step.terminate.call_count) def test_Queues_pool_no_sem(self): - raise SkipTest('disabled until v3.1') w = Mock() w.pool_cls.uses_semaphore = False - Queues(w).create(w) - self.assertIs(w.ready_queue.put, w.process_task) + components.Queues(w).create(w) + self.assertIs(w.process_task, w._process_task) - def test_EvLoop_crate(self): + def test_Hub_crate(self): w = Mock() - x = EvLoop(w) - hub = x.create(w) + x = components.Hub(w) + x.create(w) self.assertTrue(w.timer.max_interval) - self.assertIs(w.hub, hub) def test_Pool_crate_threaded(self): w = Mock() w._conninfo.connection_errors = w._conninfo.channel_errors = () w.pool_cls = Mock() w.use_eventloop = False - pool = Pool(w) + pool = components.Pool(w) pool.create(w) def test_Pool_create(self): - from celery.worker.hub import BoundedSemaphore + from kombu.async.semaphore import LaxBoundedSemaphore w = Mock() w._conninfo.connection_errors = w._conninfo.channel_errors = () w.hub = Mock() - w.hub.on_init = [] - w.pool_cls = Mock() - P = w.pool_cls.return_value = Mock() - P._cache = {} - P.timers = {Mock(): 30} + + PoolImp = Mock() + poolimp = PoolImp.return_value = Mock() + poolimp._pool = [Mock(), Mock()] + poolimp._cache = {} + poolimp._fileno_to_inq = {} + poolimp._fileno_to_outq = {} + + from celery.concurrency.prefork import TaskPool as _TaskPool + + class MockTaskPool(_TaskPool): + Pool = PoolImp + + @property + def timers(self): + return {Mock(): 30} + + w.pool_cls = MockTaskPool w.use_eventloop = True w.consumer.restart_count = -1 - pool = Pool(w) + pool = components.Pool(w) pool.create(w) - self.assertIsInstance(w.semaphore, BoundedSemaphore) - self.assertTrue(w.hub.on_init) - - hub = Mock() - w.hub.on_init[0](hub) - - cbs = w.pool.init_callbacks.call_args[1] - w = Mock() - cbs['on_process_up'](w) - hub.add_reader.assert_called_with(w.sentinel, P.maintain_pool) - - cbs['on_process_down'](w) - hub.remove.assert_called_with(w.sentinel) - - w.pool._tref_for_id = {} - - result = Mock() - - cbs['on_timeout_cancel'](result) - cbs['on_timeout_cancel'](result) # no more tref - - with self.assertRaises(WorkerLostError): - P.did_start_ok.return_value = False - w.consumer.restart_count = 0 - pool.on_poll_init(P, w, hub) + pool.register_with_event_loop(w, w.hub) + self.assertIsInstance(w.semaphore, LaxBoundedSemaphore) + P = w.pool + P.start() diff --git a/awx/lib/site-packages/celery/utils/__init__.py b/awx/lib/site-packages/celery/utils/__init__.py index 0f360a9f6c..c884028b03 100644 --- a/awx/lib/site-packages/celery/utils/__init__.py +++ b/awx/lib/site-packages/celery/utils/__init__.py @@ -6,36 +6,38 @@ Utility functions. """ -from __future__ import absolute_import -from __future__ import with_statement +from __future__ import absolute_import, print_function import os import sys import traceback import warnings -import types import datetime -from functools import wraps +from functools import partial, wraps from inspect import getargspec from pprint import pprint from kombu.entity import Exchange, Queue from celery.exceptions import CPendingDeprecationWarning, CDeprecationWarning -from .compat import StringIO +from celery.five import StringIO, items, reraise, string_t + +__all__ = ['worker_direct', 'warn_deprecated', 'deprecated', 'lpmerge', + 'is_iterable', 'isatty', 'cry', 'maybe_reraise', 'strtobool', + 'jsonify', 'gen_task_name', 'nodename', 'nodesplit', + 'cached_property'] -from .functional import noop PENDING_DEPRECATION_FMT = """ - %(description)s is scheduled for deprecation in \ - version %(deprecation)s and removal in version v%(removal)s. \ - %(alternative)s + {description} is scheduled for deprecation in \ + version {deprecation} and removal in version v{removal}. \ + {alternative} """ DEPRECATION_FMT = """ - %(description)s is deprecated and scheduled for removal in - version %(removal)s. %(alternative)s + {description} is deprecated and scheduled for removal in + version {removal}. {alternative} """ #: Billiard sets this when execv is enabled. @@ -48,16 +50,26 @@ MP_MAIN_FILE = os.environ.get('MP_MAIN_FILE') or None WORKER_DIRECT_EXCHANGE = Exchange('C.dq') #: Format for worker direct queue names. -WORKER_DIRECT_QUEUE_FORMAT = '%s.dq' +WORKER_DIRECT_QUEUE_FORMAT = '{hostname}.dq' + +#: Separator for worker node name and hostname. +NODENAME_SEP = '@' def worker_direct(hostname): + """Return :class:`kombu.Queue` that is a direct route to + a worker by hostname. + + :param hostname: The fully qualified node name of a worker + (e.g. ``w1@example.com``). If passed a + :class:`kombu.Queue` instance it will simply return + that instead. + """ if isinstance(hostname, Queue): return hostname - return Queue(WORKER_DIRECT_QUEUE_FORMAT % hostname, + return Queue(WORKER_DIRECT_QUEUE_FORMAT.format(hostname=hostname), WORKER_DIRECT_EXCHANGE, - hostname, - auto_delete=True) + hostname, auto_delete=True) def warn_deprecated(description=None, deprecation=None, @@ -66,15 +78,26 @@ def warn_deprecated(description=None, deprecation=None, 'deprecation': deprecation, 'removal': removal, 'alternative': alternative} if deprecation is not None: - w = CPendingDeprecationWarning(PENDING_DEPRECATION_FMT % ctx) + w = CPendingDeprecationWarning(PENDING_DEPRECATION_FMT.format(**ctx)) else: - w = CDeprecationWarning(DEPRECATION_FMT % ctx) + w = CDeprecationWarning(DEPRECATION_FMT.format(**ctx)) warnings.warn(w) def deprecated(description=None, deprecation=None, removal=None, alternative=None): + """Decorator for deprecated functions. + A deprecation warning will be emitted when the function is called. + + :keyword description: Description of what is being deprecated. + :keyword deprecation: Version that marks first deprecation, if this + argument is not set a ``PendingDeprecationWarning`` will be emitted + instead. + :keyword removed: Future version when this feature will be removed. + :keyword alternative: Instructions for an alternative solution (if any). + + """ def _inner(fun): @wraps(fun) @@ -94,7 +117,7 @@ def lpmerge(L, R): Keeps values from `L`, if the value in `R` is :const:`None`.""" set = L.__setitem__ - [set(k, v) for k, v in R.iteritems() if v is not None] + [set(k, v) for k, v in items(R) if v is not None] return L @@ -107,83 +130,56 @@ def is_iterable(obj): def fun_takes_kwargs(fun, kwlist=[]): - """With a function, and a list of keyword arguments, returns arguments - in the list which the function takes. - - If the object has an `argspec` attribute that is used instead - of using the :meth:`inspect.getargspec` introspection. - - :param fun: The function to inspect arguments of. - :param kwlist: The list of keyword arguments. - - Examples - - >>> def foo(self, x, y, logfile=None, loglevel=None): - ... return x * y - >>> fun_takes_kwargs(foo, ['logfile', 'loglevel', 'task_id']) - ['logfile', 'loglevel'] - - >>> def foo(self, x, y, **kwargs): - >>> fun_takes_kwargs(foo, ['logfile', 'loglevel', 'task_id']) - ['logfile', 'loglevel', 'task_id'] - - """ - argspec = getattr(fun, 'argspec', getargspec(fun)) - args, _varargs, keywords, _defaults = argspec - if keywords is not None: + # deprecated + S = getattr(fun, 'argspec', getargspec(fun)) + if S.keywords is not None: return kwlist - return [kw for kw in kwlist if kw in args] + return [kw for kw in kwlist if kw in S.args] def isatty(fh): - # Fixes bug with mod_wsgi: - # mod_wsgi.Log object has no attribute isatty. - return getattr(fh, 'isatty', None) and fh.isatty() + try: + return fh.isatty() + except AttributeError: + pass -def cry(): # pragma: no cover - """Return stacktrace of all active threads. - - From https://gist.github.com/737056 - - """ +def cry(out=None, sepchr='=', seplen=49): # pragma: no cover + """Return stacktrace of all active threads, + taken from https://gist.github.com/737056.""" import threading - tmap = {} - main_thread = None + out = StringIO() if out is None else out + P = partial(print, file=out) + # get a map of threads by their ID so we can print their names # during the traceback dump - for t in threading.enumerate(): - if getattr(t, 'ident', None): - tmap[t.ident] = t - else: - main_thread = t + tmap = dict((t.ident, t) for t in threading.enumerate()) - out = StringIO() - sep = '=' * 49 + '\n' - for tid, frame in sys._current_frames().iteritems(): - thread = tmap.get(tid, main_thread) + sep = sepchr * seplen + for tid, frame in items(sys._current_frames()): + thread = tmap.get(tid) if not thread: # skip old junk (left-overs from a fork) continue - out.write('%s\n' % (thread.getName(), )) - out.write(sep) + P('{0.name}'.format(thread)) + P(sep) traceback.print_stack(frame, file=out) - out.write(sep) - out.write('LOCAL VARIABLES\n') - out.write(sep) + P(sep) + P('LOCAL VARIABLES') + P(sep) pprint(frame.f_locals, stream=out) - out.write('\n\n') + P('\n') return out.getvalue() def maybe_reraise(): - """Reraise if an exception is currently being handled, or return + """Re-raise if an exception is currently being handled, or return otherwise.""" exc_info = sys.exc_info() try: if exc_info[2]: - raise exc_info[0], exc_info[1], exc_info[2] + reraise(exc_info[0], exc_info[1], exc_info[2]) finally: # see http://docs.python.org/library/sys.html#sys.exc_info del(exc_info) @@ -192,24 +188,39 @@ def maybe_reraise(): def strtobool(term, table={'false': False, 'no': False, '0': False, 'true': True, 'yes': True, '1': True, 'on': True, 'off': False}): - if isinstance(term, basestring): + """Convert common terms for true/false to bool + (true/false/yes/no/on/off/1/0).""" + if isinstance(term, string_t): try: return table[term.lower()] except KeyError: - raise TypeError('Cannot coerce %r to type bool' % (term, )) + raise TypeError('Cannot coerce {0!r} to type bool'.format(term)) return term -def jsonify(obj): - "Transforms object making it suitable for json serialization" - if isinstance(obj, (int, float, basestring, types.NoneType)): +def jsonify(obj, + builtin_types=(int, float, string_t), key=None, + keyfilter=None, + unknown_type_filter=None): + """Transforms object making it suitable for json serialization""" + from kombu.abstract import Object as KombuDictType + _jsonify = partial(jsonify, builtin_types=builtin_types, key=key, + keyfilter=keyfilter, + unknown_type_filter=unknown_type_filter) + + if isinstance(obj, KombuDictType): + obj = obj.as_dict(recurse=True) + + if obj is None or isinstance(obj, builtin_types): return obj elif isinstance(obj, (tuple, list)): - return [jsonify(v) for v in obj] + return [_jsonify(v) for v in obj] elif isinstance(obj, dict): - return dict((k, jsonify(v)) for k, v in obj.iteritems()) - # See "Date Time String Format" in the ECMA-262 specification. + return dict((k, _jsonify(v, key=k)) + for k, v in items(obj) + if (keyfilter(k) if keyfilter else 1)) elif isinstance(obj, datetime.datetime): + # See "Date Time String Format" in the ECMA-262 specification. r = obj.isoformat() if obj.microsecond: r = r[:23] + r[26:] @@ -226,10 +237,15 @@ def jsonify(obj): elif isinstance(obj, datetime.timedelta): return str(obj) else: - raise ValueError("Unsupported type: %s" % type(obj)) + if unknown_type_filter is None: + raise ValueError( + 'Unsupported type: {0!r} {1!r} (parent: {2})'.format( + type(obj), obj, key)) + return unknown_type_filter(obj) def gen_task_name(app, name, module_name): + """Generate task name from name/module pair.""" try: module = sys.modules[module_name] except KeyError: @@ -248,6 +264,20 @@ def gen_task_name(app, name, module_name): return '.'.join([app.main, name]) return '.'.join(p for p in (module_name, name) if p) + +def nodename(name, hostname): + """Create node name from name/hostname pair.""" + return NODENAME_SEP.join((name, hostname)) + + +def nodesplit(nodename): + """Split node name into tuple of name/hostname.""" + parts = nodename.split(NODENAME_SEP, 1) + if len(parts) == 1: + return None, parts[0] + return parts + + # ------------------------------------------------------------------------ # # > XXX Compat from .log import LOG_LEVELS # noqa diff --git a/awx/lib/site-packages/celery/utils/compat.py b/awx/lib/site-packages/celery/utils/compat.py index fcbdc5b9fa..6f62964897 100644 --- a/awx/lib/site-packages/celery/utils/compat.py +++ b/awx/lib/site-packages/celery/utils/compat.py @@ -1,172 +1 @@ -# -*- coding: utf-8 -*- -""" - celery.utils.compat - ~~~~~~~~~~~~~~~~~~~ - - Compatibility implementations of features - only available in newer Python versions. - - -""" -from __future__ import absolute_import - -############## py3k ######################################################### -import sys -is_py3k = sys.version_info[0] == 3 - -try: - reload = reload # noqa -except NameError: # pragma: no cover - from imp import reload # noqa - -try: - from UserList import UserList # noqa -except ImportError: # pragma: no cover - from collections import UserList # noqa - -try: - from UserDict import UserDict # noqa -except ImportError: # pragma: no cover - from collections import UserDict # noqa - -if is_py3k: # pragma: no cover - from io import StringIO, BytesIO - from .encoding import bytes_to_str - - class WhateverIO(StringIO): - - def write(self, data): - StringIO.write(self, bytes_to_str(data)) -else: - from StringIO import StringIO # noqa - BytesIO = WhateverIO = StringIO # noqa - - -############## collections.OrderedDict ###################################### -# was moved to kombu -from kombu.utils.compat import OrderedDict # noqa - -############## threading.TIMEOUT_MAX ####################################### -try: - from threading import TIMEOUT_MAX as THREAD_TIMEOUT_MAX -except ImportError: - THREAD_TIMEOUT_MAX = 1e10 # noqa - -############## itertools.zip_longest ####################################### - -try: - from itertools import izip_longest as zip_longest -except ImportError: # pragma: no cover - import itertools - - def zip_longest(*args, **kwds): # noqa - fillvalue = kwds.get('fillvalue') - - def sentinel(counter=([fillvalue] * (len(args) - 1)).pop): - yield counter() # yields the fillvalue, or raises IndexError - - fillers = itertools.repeat(fillvalue) - iters = [itertools.chain(it, sentinel(), fillers) - for it in args] - try: - for tup in itertools.izip(*iters): - yield tup - except IndexError: - pass - - -############## itertools.chain.from_iterable ################################ -from itertools import chain - - -def _compat_chain_from_iterable(iterables): # pragma: no cover - for it in iterables: - for element in it: - yield element - -try: - chain_from_iterable = getattr(chain, 'from_iterable') -except AttributeError: # pragma: no cover - chain_from_iterable = _compat_chain_from_iterable - - -############## logging.handlers.WatchedFileHandler ########################## -import logging -import os -from stat import ST_DEV, ST_INO -import platform as _platform - -if _platform.system() == 'Windows': # pragma: no cover - #since windows doesn't go with WatchedFileHandler use FileHandler instead - WatchedFileHandler = logging.FileHandler -else: - try: - from logging.handlers import WatchedFileHandler - except ImportError: # pragma: no cover - class WatchedFileHandler(logging.FileHandler): # noqa - """ - A handler for logging to a file, which watches the file - to see if it has changed while in use. This can happen because of - usage of programs such as newsyslog and logrotate which perform - log file rotation. This handler, intended for use under Unix, - watches the file to see if it has changed since the last emit. - (A file has changed if its device or inode have changed.) - If it has changed, the old file stream is closed, and the file - opened to get a new stream. - - This handler is not appropriate for use under Windows, because - under Windows open files cannot be moved or renamed - logging - opens the files with exclusive locks - and so there is no need - for such a handler. Furthermore, ST_INO is not supported under - Windows; stat always returns zero for this value. - - This handler is based on a suggestion and patch by Chad J. - Schroeder. - """ - def __init__(self, *args, **kwargs): - logging.FileHandler.__init__(self, *args, **kwargs) - - if not os.path.exists(self.baseFilename): - self.dev, self.ino = -1, -1 - else: - stat = os.stat(self.baseFilename) - self.dev, self.ino = stat[ST_DEV], stat[ST_INO] - - def emit(self, record): - """ - Emit a record. - - First check if the underlying file has changed, and if it - has, close the old stream and reopen the file to get the - current stream. - """ - if not os.path.exists(self.baseFilename): - stat = None - changed = 1 - else: - stat = os.stat(self.baseFilename) - changed = ((stat[ST_DEV] != self.dev) or - (stat[ST_INO] != self.ino)) - if changed and self.stream is not None: - self.stream.flush() - self.stream.close() - self.stream = self._open() - if stat is None: - stat = os.stat(self.baseFilename) - self.dev, self.ino = stat[ST_DEV], stat[ST_INO] - logging.FileHandler.emit(self, record) - - -############## format(int, ',d') ########################## - -if sys.version_info >= (2, 7): # pragma: no cover - def format_d(i): - return format(i, ',d') -else: # pragma: no cover - def format_d(i): # noqa - s = '%d' % i - groups = [] - while s and s[-1].isdigit(): - groups.append(s[-3:]) - s = s[:-3] - return s + ','.join(reversed(groups)) +from celery.five import * # noqa diff --git a/awx/lib/site-packages/celery/utils/debug.py b/awx/lib/site-packages/celery/utils/debug.py index 54c988877f..cdbc3e9c1c 100644 --- a/awx/lib/site-packages/celery/utils/debug.py +++ b/awx/lib/site-packages/celery/utils/debug.py @@ -6,31 +6,87 @@ Utilities for debugging memory usage. """ -from __future__ import absolute_import +from __future__ import absolute_import, print_function import os -from .compat import format_d +from contextlib import contextmanager +from functools import partial + +from celery.five import range +from celery.platforms import signals try: from psutil import Process except ImportError: Process = None # noqa +__all__ = [ + 'blockdetection', 'sample_mem', 'memdump', 'sample', + 'humanbytes', 'mem_rss', 'ps', +] + +UNITS = ( + (2 ** 40.0, 'TB'), + (2 ** 30.0, 'GB'), + (2 ** 20.0, 'MB'), + (2 ** 10.0, 'kB'), + (0.0, '{0!d}b'), +) + _process = None _mem_sample = [] +def _on_blocking(signum, frame): + import inspect + raise RuntimeError( + 'Blocking detection timed-out at: %s' % ( + inspect.getframeinfo(frame), )) + + +@contextmanager +def blockdetection(timeout): + """A timeout context using ``SIGALRM`` that can be used to detect blocking + functions.""" + if not timeout: + yield + else: + old_handler = signals['ALRM'] + old_handler = None if old_handler == _on_blocking else old_handler + + signals['ALRM'] = _on_blocking + + try: + yield signals.arm_alarm(timeout) + finally: + if old_handler: + signals['ALRM'] = old_handler + signals.reset_alarm() + + def sample_mem(): """Sample RSS memory usage. Statistics can then be output by calling :func:`memdump`. """ - _mem_sample.append(mem_rss()) + current_rss = mem_rss() + _mem_sample.append(current_rss) + return current_rss -def memdump(samples=10): +def _memdump(samples=10): + S = _mem_sample + prev = list(S) if len(S) <= samples else sample(S, samples) + _mem_sample[:] = [] + import gc + gc.collect() + after_collect = mem_rss() + return prev, after_collect + + +def memdump(samples=10, file=None): """Dump memory statistics. Will print a sample of all RSS memory samples added by @@ -38,17 +94,16 @@ def memdump(samples=10): used RSS memory after :func:`gc.collect`. """ + say = partial(print, file=file) if ps() is None: - print('- rss: (psutil not installed).') + say('- rss: (psutil not installed).') return - if any(_mem_sample): - print('- rss (sample):') - for mem in sample(_mem_sample, samples): - print('- > %s,' % mem) - _mem_sample[:] = [] - import gc - gc.collect() - print('- rss (end): %s.' % (mem_rss())) + prev, after_collect = _memdump(samples) + if prev: + say('- rss (sample):') + for mem in prev: + say('- > {0},'.format(mem)) + say('- rss (end): {0}.'.format(after_collect)) def sample(x, n, k=0): @@ -61,20 +116,41 @@ def sample(x, n, k=0): """ j = len(x) // n - for _ in xrange(n): - yield x[k] + for _ in range(n): + try: + yield x[k] + except IndexError: + break k += j +def hfloat(f, p=5): + """Convert float to value suitable for humans. + + :keyword p: Float precision. + + """ + i = int(f) + return i if i == f else '{0:.{p}}'.format(f, p=p) + + +def humanbytes(s): + """Convert bytes to human-readable form (e.g. kB, MB).""" + return next( + '{0}{1}'.format(hfloat(s / div if div else s), unit) + for div, unit in UNITS if s >= div + ) + + def mem_rss(): - """Returns RSS memory usage as a humanized string.""" + """Return RSS memory usage as a humanized string.""" p = ps() if p is not None: - return '%sMB' % (format_d(p.get_memory_info().rss // 1024), ) + return humanbytes(p.get_memory_info().rss) def ps(): - """Returns the global :class:`psutil.Process` instance, + """Return the global :class:`psutil.Process` instance, or :const:`None` if :mod:`psutil` is not installed.""" global _process if _process is None and Process is not None: diff --git a/awx/lib/site-packages/celery/utils/dispatch/__init__.py b/awx/lib/site-packages/celery/utils/dispatch/__init__.py index 888fe6d8af..b6e8d0b23b 100644 --- a/awx/lib/site-packages/celery/utils/dispatch/__init__.py +++ b/awx/lib/site-packages/celery/utils/dispatch/__init__.py @@ -1,4 +1,6 @@ # -*- coding: utf-8 -*- from __future__ import absolute_import -from .signal import Signal # noqa +from .signal import Signal + +__all__ = ['Signal'] diff --git a/awx/lib/site-packages/celery/utils/dispatch/saferef.py b/awx/lib/site-packages/celery/utils/dispatch/saferef.py index 7aa2fd0e6b..d11fdc3084 100644 --- a/awx/lib/site-packages/celery/utils/dispatch/saferef.py +++ b/awx/lib/site-packages/celery/utils/dispatch/saferef.py @@ -10,6 +10,10 @@ from __future__ import absolute_import import weakref import traceback +from collections import Callable + +__all__ = ['safe_ref'] + def safe_ref(target, on_delete=None): # pragma: no cover """Return a *safe* weak reference to a callable target @@ -23,15 +27,15 @@ def safe_ref(target, on_delete=None): # pragma: no cover goes out of scope with the reference object, (either a :class:`weakref.ref` or a :class:`BoundMethodWeakref`) as argument. """ - if getattr(target, "im_self", None) is not None: + if getattr(target, '__self__', None) is not None: # Turn a bound method into a BoundMethodWeakref instance. # Keep track of these instances for lookup by disconnect(). - assert hasattr(target, 'im_func'), \ - """safe_ref target %r has im_self, but no im_func, " \ - "don't know how to create reference""" % (target, ) + assert hasattr(target, '__func__'), \ + """safe_ref target {0!r} has __self__, but no __func__: \ + don't know how to create reference""".format(target) return get_bound_method_weakref(target=target, on_delete=on_delete) - if callable(on_delete): + if isinstance(on_delete, Callable): return weakref.ref(target, on_delete) else: return weakref.ref(target) @@ -66,7 +70,7 @@ class BoundMethodWeakref(object): # pragma: no cover weak reference to the target object - .. attribute:: weak_func + .. attribute:: weak_fun weak reference to the target function @@ -112,10 +116,10 @@ class BoundMethodWeakref(object): # pragma: no cover """Return a weak-reference-like instance for a bound method :param target: the instance-method target for the weak - reference, must have `im_self` and `im_func` attributes + reference, must have `__self__` and `__func__` attributes and be reconstructable via:: - target.im_func.__get__(target.im_self) + target.__func__.__get__(target.__self__) which is true of built-in instance methods. @@ -136,21 +140,21 @@ class BoundMethodWeakref(object): # pragma: no cover pass for function in methods: try: - if callable(function): + if isinstance(function, Callable): function(self) - except Exception, exc: + except Exception as exc: try: traceback.print_exc() except AttributeError: - print("Exception during saferef %s cleanup function " - "%s: %s" % (self, function, exc)) + print('Exception during saferef {0} cleanup function ' + '{1}: {2}'.format(self, function, exc)) self.deletion_methods = [on_delete] self.key = self.calculate_key(target) - self.weak_self = weakref.ref(target.im_self, remove) - self.weak_func = weakref.ref(target.im_func, remove) - self.self_name = str(target.im_self) - self.func_name = str(target.im_func.__name__) + self.weak_self = weakref.ref(target.__self__, remove) + self.weak_fun = weakref.ref(target.__func__, remove) + self.self_name = str(target.__self__) + self.fun_name = str(target.__func__.__name__) def calculate_key(cls, target): """Calculate the reference key for this reference @@ -158,22 +162,23 @@ class BoundMethodWeakref(object): # pragma: no cover Currently this is a two-tuple of the `id()`'s of the target object and the target function respectively. """ - return id(target.im_self), id(target.im_func) + return id(target.__self__), id(target.__func__) calculate_key = classmethod(calculate_key) def __str__(self): """Give a friendly representation of the object""" - return """%s( %s.%s )""" % ( - self.__class__.__name__, + return '{0}( {1}.{2} )'.format( + type(self).__name__, self.self_name, - self.func_name, + self.fun_name, ) __repr__ = __str__ - def __nonzero__(self): + def __bool__(self): """Whether we are still a valid reference""" return self() is not None + __nonzero__ = __bool__ # py2 def __cmp__(self, other): """Compare with another reference""" @@ -185,7 +190,7 @@ class BoundMethodWeakref(object): # pragma: no cover """Return a strong reference to the bound method If the target cannot be retrieved, then will - return None, otherwise returns a bound instance + return None, otherwise return a bound instance method for our object and function. Note: @@ -194,7 +199,7 @@ class BoundMethodWeakref(object): # pragma: no cover """ target = self.weak_self() if target is not None: - function = self.weak_func() + function = self.weak_fun() if function is not None: return function.__get__(target) @@ -212,7 +217,7 @@ class BoundNonDescriptorMethodWeakref(BoundMethodWeakref): # pragma: no cover ... pass >>> def foo(self): - ... return "foo" + ... return 'foo' >>> A.bar = foo But this shouldn't be a common use case. So, on platforms where methods @@ -224,10 +229,10 @@ class BoundNonDescriptorMethodWeakref(BoundMethodWeakref): # pragma: no cover """Return a weak-reference-like instance for a bound method :param target: the instance-method target for the weak - reference, must have `im_self` and `im_func` attributes + reference, must have `__self__` and `__func__` attributes and be reconstructable via:: - target.im_func.__get__(target.im_self) + target.__func__.__get__(target.__self__) which is true of built-in instance methods. @@ -238,9 +243,7 @@ class BoundNonDescriptorMethodWeakref(BoundMethodWeakref): # pragma: no cover which will be passed a pointer to this object. """ - assert getattr(target.im_self, target.__name__) == target, \ - "method %s isn't available as the attribute %s of %s" % ( - target, target.__name__, target.im_self) + assert getattr(target.__self__, target.__name__) == target super(BoundNonDescriptorMethodWeakref, self).__init__(target, on_delete) @@ -248,7 +251,7 @@ class BoundNonDescriptorMethodWeakref(BoundMethodWeakref): # pragma: no cover """Return a strong reference to the bound method If the target cannot be retrieved, then will - return None, otherwise returns a bound instance + return None, otherwise return a bound instance method for our object and function. Note: @@ -258,7 +261,7 @@ class BoundNonDescriptorMethodWeakref(BoundMethodWeakref): # pragma: no cover """ target = self.weak_self() if target is not None: - function = self.weak_func() + function = self.weak_fun() if function is not None: # Using curry() would be another option, but it erases the # "signature" of the function. That is, after a function is diff --git a/awx/lib/site-packages/celery/utils/dispatch/signal.py b/awx/lib/site-packages/celery/utils/dispatch/signal.py index 1bee956f30..fe81e0bcce 100644 --- a/awx/lib/site-packages/celery/utils/dispatch/signal.py +++ b/awx/lib/site-packages/celery/utils/dispatch/signal.py @@ -3,14 +3,18 @@ from __future__ import absolute_import import weakref +from collections import Callable from . import saferef +from celery.five import range + +__all__ = ['Signal'] WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref) def _make_id(target): # pragma: no cover - if hasattr(target, 'im_func'): - return (id(target.im_self), id(target.im_func)) + if hasattr(target, '__func__'): + return (id(target.__self__), id(target.__func__)) return id(target) @@ -91,7 +95,7 @@ class Signal(object): # pragma: no cover return _connect_signal - if args and callable(args[0]): + if args and isinstance(args[0], Callable): return _handle_options(*args[1:], **kwargs)(args[0]) return _handle_options(*args, **kwargs) @@ -118,7 +122,7 @@ class Signal(object): # pragma: no cover else: lookup_key = (_make_id(receiver), _make_id(sender)) - for index in xrange(len(self.receivers)): + for index in range(len(self.receivers)): (r_key, _) = self.receivers[index] if r_key == lookup_key: del self.receivers[index] @@ -136,7 +140,7 @@ class Signal(object): # pragma: no cover :keyword \*\*named: Named arguments which will be passed to receivers. - :returns: a list of tuple pairs: `[(receiver, response), ... ]`. + :returns: a list of tuple pairs: `[(receiver, response), … ]`. """ responses = [] @@ -159,7 +163,7 @@ class Signal(object): # pragma: no cover These arguments must be a subset of the argument names defined in :attr:`providing_args`. - :returns: a list of tuple pairs: `[(receiver, response), ... ]`. + :returns: a list of tuple pairs: `[(receiver, response), … ]`. :raises DispatcherKeyError: @@ -173,11 +177,11 @@ class Signal(object): # pragma: no cover return responses # Call each receiver with whatever arguments it can accept. - # Return a list of tuple pairs [(receiver, response), ... ]. + # Return a list of tuple pairs [(receiver, response), … ]. for receiver in self._live_receivers(_make_id(sender)): try: response = receiver(signal=self, sender=sender, **named) - except Exception, err: + except Exception as err: responses.append((receiver, err)) else: responses.append((receiver, response)) @@ -217,6 +221,6 @@ class Signal(object): # pragma: no cover del self.receivers[idx] def __repr__(self): - return '' % (self.__class__.__name__, ) + return ''.format(type(self).__name__) __str__ = __repr__ diff --git a/awx/lib/site-packages/celery/utils/functional.py b/awx/lib/site-packages/celery/utils/functional.py index 011f32343a..d2d597194c 100644 --- a/awx/lib/site-packages/celery/utils/functional.py +++ b/awx/lib/site-packages/celery/utils/functional.py @@ -7,18 +7,22 @@ """ from __future__ import absolute_import -from __future__ import with_statement +import sys import threading from functools import wraps from itertools import islice from kombu.utils import cached_property -from kombu.utils.functional import promise, maybe_promise -from kombu.utils.compat import OrderedDict, next +from kombu.utils.functional import lazy, maybe_evaluate, is_list, maybe_list +from kombu.utils.compat import OrderedDict -from .compat import UserDict, UserList +from celery.five import UserDict, UserList, items, keys + +__all__ = ['LRUCache', 'is_list', 'maybe_list', 'memoize', 'mlazy', 'noop', + 'first', 'firstmethod', 'chunks', 'padlist', 'mattrgetter', 'uniq', + 'regen', 'dictfilter', 'lazy', 'maybe_evaluate'] KEYWORD_MARK = object() @@ -43,16 +47,6 @@ class LRUCache(UserDict): value = self[key] = self.data.pop(key) return value - def keys(self): - # userdict.keys in py3k calls __getitem__ - return self.data.keys() - - def values(self): - return list(self._iterate_values()) - - def items(self): - return list(self._iterate_items()) - def update(self, *args, **kwargs): with self.mutex: data, limit = self.data, self.limit @@ -67,7 +61,7 @@ class LRUCache(UserDict): # remove least recently used key. with self.mutex: if self.limit and len(self.data) >= self.limit: - self.data.pop(iter(self.data).next()) + self.data.pop(next(iter(self.data))) self.data[key] = value def __iter__(self): @@ -89,6 +83,11 @@ class LRUCache(UserDict): pass itervalues = _iterate_values + def _iterate_keys(self): + # userdict.keys in py3k calls __getitem__ + return keys(self.data) + iterkeys = _iterate_keys + def incr(self, key, delta=1): with self.mutex: # this acts as memcached does- store as a string, but return a @@ -106,15 +105,20 @@ class LRUCache(UserDict): self.__dict__ = state self.mutex = threading.RLock() + if sys.version_info[0] == 3: # pragma: no cover + keys = _iterate_keys + values = _iterate_values + items = _iterate_items + else: # noqa -def is_list(l): - """Returns true if object is list-like, but not a dict or string.""" - return hasattr(l, '__iter__') and not isinstance(l, (dict, basestring)) + def keys(self): + return list(self._iterate_keys()) + def values(self): + return list(self._iterate_values()) -def maybe_list(l): - """Returns list of one element if ``l`` is a scalar.""" - return l if l is None or is_list(l) else [l] + def items(self): + return list(self._iterate_items()) def memoize(maxsize=None, Cache=LRUCache): @@ -125,7 +129,7 @@ def memoize(maxsize=None, Cache=LRUCache): @wraps(fun) def _M(*args, **kwargs): - key = args + (KEYWORD_MARK, ) + tuple(sorted(kwargs.iteritems())) + key = args + (KEYWORD_MARK, ) + tuple(sorted(kwargs.items())) try: with mutex: value = cache[key] @@ -151,15 +155,15 @@ def memoize(maxsize=None, Cache=LRUCache): return _memoize -class mpromise(promise): - """Memoized promise. +class mlazy(lazy): + """Memoized lazy evaluation. The function is only evaluated once, every subsequent access will return the same value. .. attribute:: evaluated - Set to to :const:`True` after the promise has been evaluated. + Set to to :const:`True` after the object has been evaluated. """ evaluated = False @@ -167,7 +171,7 @@ class mpromise(promise): def evaluate(self): if not self.evaluated: - self._value = super(mpromise, self).evaluate() + self._value = super(mlazy, self).evaluate() self.evaluated = True return self._value @@ -182,7 +186,7 @@ def noop(*args, **kwargs): def first(predicate, it): - """Returns the first element in `iterable` that `predicate` returns a + """Return the first element in `iterable` that `predicate` Gives a :const:`True` value for. If `predicate` is None it will return the first item that is not None. @@ -195,17 +199,18 @@ def first(predicate, it): def firstmethod(method): - """Returns a function that with a list of instances, - finds the first instance that returns a value for the given method. + """Return a function that with a list of instances, + finds the first instance that gives a value for the given method. - The list can also contain promises (:class:`promise`.) + The list can also contain lazy instances + (:class:`~kombu.utils.functional.lazy`.) """ def _matcher(it, *args, **kwargs): for obj in it: try: - answer = getattr(maybe_promise(obj), method)(*args, **kwargs) + answer = getattr(maybe_evaluate(obj), method)(*args, **kwargs) except AttributeError: pass else: @@ -245,8 +250,9 @@ def padlist(container, size, default=None): ('George', 'Costanza', 'NYC') >>> first, last, city = padlist(['George', 'Costanza'], 3) ('George', 'Costanza', None) - >>> first, last, city, planet = padlist(['George', 'Costanza', - 'NYC'], 4, default='Earth') + >>> first, last, city, planet = padlist( + ... ['George', 'Costanza', 'NYC'], 4, default='Earth', + ... ) ('George', 'Costanza', 'NYC', 'Earth') """ @@ -254,14 +260,14 @@ def padlist(container, size, default=None): def mattrgetter(*attrs): - """Like :func:`operator.itemgetter` but returns :const:`None` on missing + """Like :func:`operator.itemgetter` but return :const:`None` on missing attributes instead of raising :exc:`AttributeError`.""" return lambda obj: dict((attr, getattr(obj, attr, None)) for attr in attrs) def uniq(it): - """Returns all unique elements in ``it``, preserving order.""" + """Return all unique elements in ``it``, preserving order.""" seen = set() return (seen.add(obj) or obj for obj in it if obj not in seen) @@ -287,5 +293,8 @@ class _regen(UserList, list): def data(self): return list(self.__it) - def __iter__(self): # needed for Python 2.5 - return iter(self.data) + +def dictfilter(d=None, **kw): + """Remove all keys from dict ``d`` whose value is :const:`None`""" + d = kw if d is None else (dict(d, **kw) if kw else d) + return dict((k, v) for k, v in items(d) if v is not None) diff --git a/awx/lib/site-packages/celery/utils/imports.py b/awx/lib/site-packages/celery/utils/imports.py index e46462663e..22a2fdcd31 100644 --- a/awx/lib/site-packages/celery/utils/imports.py +++ b/awx/lib/site-packages/celery/utils/imports.py @@ -7,7 +7,6 @@ """ from __future__ import absolute_import -from __future__ import with_statement import imp as _imp import importlib @@ -18,15 +17,19 @@ from contextlib import contextmanager from kombu.utils import symbol_by_name -from .compat import reload +from celery.five import reload + +__all__ = [ + 'NotAPackage', 'qualname', 'instantiate', 'symbol_by_name', 'cwd_in_path', + 'find_module', 'import_from_cwd', 'reload_from_cwd', 'module_file', +] class NotAPackage(Exception): pass -if sys.version_info >= (3, 3): # pragma: no cover - +if sys.version_info > (3, 3): # pragma: no cover def qualname(obj): if not hasattr(obj, '__name__') and hasattr(obj, '__class__'): obj = obj.__class__ @@ -35,10 +38,9 @@ if sys.version_info >= (3, 3): # pragma: no cover q = '.'.join((obj.__module__, q)) return q else: - def qualname(obj): # noqa if not hasattr(obj, '__name__') and hasattr(obj, '__class__'): - return qualname(obj.__class__) + obj = obj.__class__ return '.'.join((obj.__module__, obj.__name__)) @@ -107,5 +109,6 @@ def reload_from_cwd(module, reloader=None): def module_file(module): + """Return the correct original file name of a module.""" name = module.__file__ return name[:-1] if name.endswith('.pyc') else name diff --git a/awx/lib/site-packages/celery/utils/iso8601.py b/awx/lib/site-packages/celery/utils/iso8601.py new file mode 100644 index 0000000000..d9de247091 --- /dev/null +++ b/awx/lib/site-packages/celery/utils/iso8601.py @@ -0,0 +1,76 @@ +""" +Originally taken from pyiso8601 (http://code.google.com/p/pyiso8601/) + +Modified to match the behavior of dateutil.parser: + + - raise ValueError instead of ParseError + - return naive datetimes by default + - uses pytz.FixedOffset + +This is the original License: + +Copyright (c) 2007 Michael Twomey + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +""" +from __future__ import absolute_import + +import re + +from datetime import datetime +from pytz import FixedOffset + +__all__ = ['parse_iso8601'] + +# Adapted from http://delete.me.uk/2005/03/iso8601.html +ISO8601_REGEX = re.compile( + r'(?P[0-9]{4})(-(?P[0-9]{1,2})(-(?P[0-9]{1,2})' + r'((?P.)(?P[0-9]{2}):(?P[0-9]{2})' + '(:(?P[0-9]{2})(\.(?P[0-9]+))?)?' + r'(?PZ|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?' +) +TIMEZONE_REGEX = re.compile( + '(?P[+-])(?P[0-9]{2}).(?P[0-9]{2})' +) + + +def parse_iso8601(datestring): + """Parse and convert ISO 8601 string into a datetime object""" + m = ISO8601_REGEX.match(datestring) + if not m: + raise ValueError('unable to parse date string %r' % datestring) + groups = m.groupdict() + tz = groups['timezone'] + if tz and tz != 'Z': + m = TIMEZONE_REGEX.match(tz) + prefix, hours, minutes = m.groups() + hours, minutes = int(hours), int(minutes) + if prefix == '-': + hours = -hours + minutes = -minutes + tz = FixedOffset(minutes + hours * 60) + frac = groups['fraction'] + groups['fraction'] = int(float('0.%s' % frac) * 1e6) if frac else 0 + return datetime( + int(groups['year']), int(groups['month']), int(groups['day']), + int(groups['hour']), int(groups['minute']), int(groups['second']), + int(groups['fraction']), tz + ) diff --git a/awx/lib/site-packages/celery/utils/log.py b/awx/lib/site-packages/celery/utils/log.py index efe2239ba3..80b25476d9 100644 --- a/awx/lib/site-packages/celery/utils/log.py +++ b/awx/lib/site-packages/celery/utils/log.py @@ -6,7 +6,7 @@ Logging utilities. """ -from __future__ import absolute_import +from __future__ import absolute_import, print_function import logging import os @@ -14,14 +14,23 @@ import sys import threading import traceback +from contextlib import contextmanager from billiard import current_process, util as mputil +from kombu.five import values from kombu.log import get_logger as _get_logger, LOG_LEVELS +from kombu.utils.encoding import safe_str + +from celery.five import string_t, text_t -from .encoding import safe_str, str_t from .term import colored +__all__ = ['ColorFormatter', 'LoggingProxy', 'base_logger', + 'set_in_sighandler', 'in_sighandler', 'get_logger', + 'get_task_logger', 'mlevel', 'ensure_process_aware_logger', + 'get_multiprocessing_logger', 'reset_multiprocessing_logger'] + _process_aware = False -is_py3k = sys.version_info[0] == 3 +PY3 = sys.version_info[0] == 3 MP_LOG = os.environ.get('MP_LOG', False) @@ -34,25 +43,68 @@ MP_LOG = os.environ.get('MP_LOG', False) base_logger = logger = _get_logger('celery') mp_logger = _get_logger('multiprocessing') -in_sighandler = False +_in_sighandler = False def set_in_sighandler(value): - global in_sighandler - in_sighandler = value + global _in_sighandler + _in_sighandler = value + + +def iter_open_logger_fds(): + seen = set() + loggers = (list(values(logging.Logger.manager.loggerDict)) + + [logging.getLogger(None)]) + for logger in loggers: + try: + for handler in logger.handlers: + try: + if handler not in seen: + yield handler.stream + seen.add(handler) + except AttributeError: + pass + except AttributeError: # PlaceHolder does not have handlers + pass + + +@contextmanager +def in_sighandler(): + set_in_sighandler(True) + try: + yield + finally: + set_in_sighandler(False) + + +def logger_isa(l, p): + this, seen = l, set() + while this: + if this == p: + return True + else: + if this in seen: + raise RuntimeError( + 'Logger {0!r} parents recursive'.format(l), + ) + seen.add(this) + this = this.parent + return False def get_logger(name): l = _get_logger(name) if logging.root not in (l, l.parent) and l is not base_logger: - l.parent = base_logger + if not logger_isa(l, base_logger): + l.parent = base_logger return l task_logger = get_logger('celery.task') +worker_logger = get_logger('celery.worker') def get_task_logger(name): logger = get_logger(name) - if logger.parent is logging.root: + if not logger_isa(logger, task_logger): logger.parent = task_logger return logger @@ -77,37 +129,35 @@ class ColorFormatter(logging.Formatter): if ei and not isinstance(ei, tuple): ei = sys.exc_info() r = logging.Formatter.formatException(self, ei) - if isinstance(r, str) and not is_py3k: + if isinstance(r, str) and not PY3: return safe_str(r) return r def format(self, record): - levelname = record.levelname - color = self.colors.get(levelname) + sformat = logging.Formatter.format + color = self.colors.get(record.levelname) - if self.use_color and color: + if color and self.use_color: + msg = record.msg try: - msg = record.msg # safe_str will repr the color object # and color will break on non-string objects # so need to reorder calls based on type. # Issue #427 - if isinstance(msg, basestring): - record.msg = str_t(color(safe_str(msg))) - else: - record.msg = safe_str(color(msg)) - except Exception, exc: - record.msg = '' % ( - type(record.msg), exc) + try: + if isinstance(msg, string_t): + record.msg = text_t(color(safe_str(msg))) + else: + record.msg = safe_str(color(msg)) + except UnicodeDecodeError: + record.msg = safe_str(msg) # skip colors + except Exception as exc: + record.msg = ''.format( + type(msg), exc) record.exc_info = True - - if not is_py3k and 'processName' not in record.__dict__: - # Very ugly, but have to make sure processName is supported - # by foreign logger instances. - # (processName is always supported by Python 2.7) - process_name = current_process and current_process()._name or '' - record.__dict__['processName'] = process_name - return safe_str(logging.Formatter.format(self, record)) + return sformat(self, record) + else: + return safe_str(sformat(self, record)) class LoggingProxy(object): @@ -151,13 +201,12 @@ class LoggingProxy(object): del(exc_info) handler.handleError = WithSafeHandleError().handleError - return [wrap_handler(h) for h in self.logger.handlers] def write(self, data): """Write message to logging object.""" - if in_sighandler: - return sys.__stderr__.write(safe_str(data)) + if _in_sighandler: + return print(safe_str(data), file=sys.__stderr__) if getattr(self._thread, 'recurse_protection', False): # Logger is logging back to this file, so stop recursing. return @@ -192,7 +241,7 @@ class LoggingProxy(object): self.closed = True def isatty(self): - """Always returns :const:`False`. Just here for file support.""" + """Always return :const:`False`. Just here for file support.""" return False @@ -240,8 +289,7 @@ def _patch_logger_class(): _signal_safe = True def log(self, *args, **kwargs): - if in_sighandler: - sys.__stderr__.write('CANNOT LOG IN SIGHANDLER') + if _in_sighandler: return return OldLoggerClass.log(self, *args, **kwargs) logging.setLoggerClass(SigSafeLogger) diff --git a/awx/lib/site-packages/celery/utils/mail.py b/awx/lib/site-packages/celery/utils/mail.py index 7fd7af6a47..00c5f29a9d 100644 --- a/awx/lib/site-packages/celery/utils/mail.py +++ b/awx/lib/site-packages/celery/utils/mail.py @@ -8,7 +8,6 @@ """ from __future__ import absolute_import -import sys import smtplib import socket import traceback @@ -17,9 +16,14 @@ import warnings from email.mime.text import MIMEText from .functional import maybe_list -from .imports import symbol_by_name -supports_timeout = sys.version_info >= (2, 6) +try: + from ssl import SSLError +except ImportError: # pragma: no cover + class SSLError(Exception): # noqa + """fallback used when ssl module not compiled.""" + +__all__ = ['SendmailWarning', 'Message', 'Mailer', 'ErrorMail'] _local_hostname = None @@ -46,7 +50,7 @@ class Message(object): self.charset = charset def __repr__(self): - return '' % (self.to, self.subject) + return ''.format(self) def __str__(self): msg = MIMEText(self.body, 'plain', self.charset) @@ -57,7 +61,6 @@ class Message(object): class Mailer(object): - supports_timeout = supports_timeout def __init__(self, host='localhost', port=0, user=None, password=None, timeout=2, use_ssl=False, use_tls=False): @@ -69,30 +72,21 @@ class Mailer(object): self.use_ssl = use_ssl self.use_tls = use_tls - def send(self, message, fail_silently=False): + def send(self, message, fail_silently=False, **kwargs): try: - if self.supports_timeout: - self._send(message, timeout=self.timeout) - else: - import socket - old_timeout = socket.getdefaulttimeout() - socket.setdefaulttimeout(self.timeout) - try: - self._send(message) - finally: - socket.setdefaulttimeout(old_timeout) - except Exception, exc: + self._send(message, **kwargs) + except Exception as exc: if not fail_silently: raise warnings.warn(SendmailWarning( - 'Mail could not be sent: %r %r\n%r' % ( + 'Mail could not be sent: {0!r} {1!r}\n{2!r}'.format( exc, {'To': ', '.join(message.to), 'Subject': message.subject}, traceback.format_stack()))) def _send(self, message, **kwargs): Client = smtplib.SMTP_SSL if self.use_ssl else smtplib.SMTP - client = Client(self.host, self.port, + client = Client(self.host, self.port, timeout=self.timeout, local_hostname=get_local_hostname(), **kwargs) if self.use_tls: @@ -106,7 +100,7 @@ class Mailer(object): client.sendmail(message.sender, message.to, str(message)) try: client.quit() - except socket.sslerror: + except SSLError: client.close() @@ -144,7 +138,7 @@ class ErrorMail(object): * hostname - Worker hostname. + Worker nodename. """ @@ -154,45 +148,40 @@ class ErrorMail(object): #: Format string used to generate error email subjects. subject = """\ - [celery@%(hostname)s] Error: Task %(name)s (%(id)s): %(exc)s + [{hostname}] Error: Task {name} ({id}): {exc!r} """ #: Format string used to generate error email content. body = """ -Task %%(name)s with id %%(id)s raised exception:\n%%(exc)r +Task {{name}} with id {{id}} raised exception:\n{{exc!r}} -Task was called with args: %%(args)s kwargs: %%(kwargs)s. +Task was called with args: {{args}} kwargs: {{kwargs}}. The contents of the full traceback was: -%%(traceback)s +{{traceback}} -%(EMAIL_SIGNATURE_SEP)s +{EMAIL_SIGNATURE_SEP} Just to let you know, -py-celery at %%(hostname)s. -""" % {'EMAIL_SIGNATURE_SEP': EMAIL_SIGNATURE_SEP} - - error_whitelist = None +py-celery at {{hostname}}. +""".format(EMAIL_SIGNATURE_SEP=EMAIL_SIGNATURE_SEP) def __init__(self, task, **kwargs): self.task = task - self.email_subject = kwargs.get('subject', self.subject) - self.email_body = kwargs.get('body', self.body) - self.error_whitelist = getattr(task, 'error_whitelist', None) or () + self.subject = kwargs.get('subject', self.subject) + self.body = kwargs.get('body', self.body) def should_send(self, context, exc): - """Returns true or false depending on if a task error mail + """Return true or false depending on if a task error mail should be sent for this type of error.""" - return not self.error_whitelist or isinstance( - exc, tuple(symbol_by_name(n) for n in self.error_whitelist), - ) + return True def format_subject(self, context): - return self.subject.strip() % context + return self.subject.strip().format(**context) def format_body(self, context): - return self.body.strip() % context + return self.body.strip().format(**context) def send(self, context, exc, fail_silently=True): if self.should_send(context, exc): diff --git a/awx/lib/site-packages/celery/utils/objects.py b/awx/lib/site-packages/celery/utils/objects.py new file mode 100644 index 0000000000..b2ad646b36 --- /dev/null +++ b/awx/lib/site-packages/celery/utils/objects.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +""" + celery.utils.objects + ~~~~~~~~~~~~~~~~~~~~ + + Object related utilities including introspection, etc. + +""" +from __future__ import absolute_import + +__all__ = ['mro_lookup'] + + +def mro_lookup(cls, attr, stop=(), monkey_patched=[]): + """Return the first node by MRO order that defines an attribute. + + :keyword stop: A list of types that if reached will stop the search. + :keyword monkey_patched: Use one of the stop classes if the attr's + module origin is not in this list, this to detect monkey patched + attributes. + + :returns None: if the attribute was not found. + + """ + for node in cls.mro(): + if node in stop: + try: + attr = node.__dict__[attr] + module_origin = attr.__module__ + except (AttributeError, KeyError): + pass + else: + if module_origin not in monkey_patched: + return node + return + if attr in node.__dict__: + return node diff --git a/awx/lib/site-packages/celery/utils/serialization.py b/awx/lib/site-packages/celery/utils/serialization.py index 0cce7e971a..d5509f1c20 100644 --- a/awx/lib/site-packages/celery/utils/serialization.py +++ b/awx/lib/site-packages/celery/utils/serialization.py @@ -8,46 +8,30 @@ """ from __future__ import absolute_import -import inspect -import sys -import types +from inspect import getmro +from itertools import takewhile -import pickle as pypickle try: - import cPickle as cpickle + import cPickle as pickle except ImportError: - cpickle = None # noqa + import pickle # noqa from .encoding import safe_repr - -if sys.version_info < (2, 6): # pragma: no cover - # cPickle is broken in Python <= 2.6. - # It unsafely and incorrectly uses relative instead of absolute imports, - # so e.g.: - # exceptions.KeyError - # becomes: - # celery.exceptions.KeyError - # - # Your best choice is to upgrade to Python 2.6, - # as while the pure pickle version has worse performance, - # it is the only safe option for older Python versions. - pickle = pypickle -else: - pickle = cpickle or pypickle +__all__ = ['UnpickleableExceptionWrapper', 'subclass_exception', + 'find_pickleable_exception', 'create_exception_cls', + 'get_pickleable_exception', 'get_pickleable_etype', + 'get_pickled_exception'] #: List of base classes we probably don't want to reduce to. -unwanted_base_classes = (StandardError, Exception, BaseException, object) +try: + unwanted_base_classes = (StandardError, Exception, BaseException, object) +except NameError: # pragma: no cover + unwanted_base_classes = (Exception, BaseException, object) # py3k -if sys.version_info < (2, 5): # pragma: no cover - # Prior to Python 2.5, Exception was an old-style class - def subclass_exception(name, parent, unused): - return types.ClassType(name, (parent,), {}) -else: - - def subclass_exception(name, parent, module): # noqa - return type(name, (parent,), {'__module__': module}) +def subclass_exception(name, parent, module): # noqa + return type(name, (parent, ), {'__module__': module}) def find_pickleable_exception(exc, loads=pickle.loads, @@ -60,8 +44,9 @@ def find_pickleable_exception(exc, loads=pickle.loads, :param exc: An exception instance. - :returns: the nearest exception if it's not :exc:`Exception` or below, - if it is it returns :const:`None`. + Will return the nearest pickleable parent exception class + (except :exc:`Exception` and parents), or if the exception is + pickleable it will return :const:`None`. :rtype :exc:`Exception`: @@ -79,22 +64,7 @@ find_nearest_pickleable_exception = find_pickleable_exception # XXX compat def itermro(cls, stop): - getmro_ = getattr(cls, 'mro', None) - - # old-style classes doesn't have mro() - if not getmro_: # pragma: no cover - # all Py2.4 exceptions has a baseclass. - if not getattr(cls, '__bases__', ()): - return - # Use inspect.getmro() to traverse bases instead. - getmro_ = lambda: inspect.getmro(cls) - - for supercls in getmro_(): - if supercls in stop: - # only BaseException and object, from here on down, - # we don't care about these. - return - yield supercls + return takewhile(lambda sup: sup not in stop, getmro(cls)) def create_exception_cls(name, module, parent=None): @@ -115,13 +85,16 @@ class UnpickleableExceptionWrapper(Exception): .. code-block:: python - >>> try: - ... something_raising_unpickleable_exc() - >>> except Exception, e: - ... exc = UnpickleableException(e.__class__.__module__, - ... e.__class__.__name__, - ... e.args) - ... pickle.dumps(exc) # Works fine. + >>> def pickle_it(raising_function): + ... try: + ... raising_function() + ... except Exception as e: + ... exc = UnpickleableExceptionWrapper( + ... e.__class__.__module__, + ... e.__class__.__name__, + ... e.args, + ... ) + ... pickle.dumps(exc) # Works fine. """ @@ -182,6 +155,8 @@ def get_pickleable_etype(cls, loads=pickle.loads, dumps=pickle.dumps): loads(dumps(cls)) except: return Exception + else: + return cls def get_pickled_exception(exc): diff --git a/awx/lib/site-packages/celery/utils/sysinfo.py b/awx/lib/site-packages/celery/utils/sysinfo.py new file mode 100644 index 0000000000..65073a6f9d --- /dev/null +++ b/awx/lib/site-packages/celery/utils/sysinfo.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +import os + +from math import ceil + +from kombu.utils import cached_property + +__all__ = ['load_average', 'df'] + + +if hasattr(os, 'getloadavg'): + + def load_average(): + return tuple(ceil(l * 1e2) / 1e2 for l in os.getloadavg()) + +else: # pragma: no cover + # Windows doesn't have getloadavg + def load_average(): # noqa + return (0.0, 0.0, 0.0) + + +class df(object): + + def __init__(self, path): + self.path = path + + @property + def total_blocks(self): + return self.stat.f_blocks * self.stat.f_frsize / 1024 + + @property + def available(self): + return self.stat.f_bavail * self.stat.f_frsize / 1024 + + @property + def capacity(self): + avail = self.stat.f_bavail + used = self.stat.f_blocks - self.stat.f_bfree + return int(ceil(used * 100.0 / (used + avail) + 0.5)) + + @cached_property + def stat(self): + return os.statvfs(os.path.abspath(self.path)) diff --git a/awx/lib/site-packages/celery/utils/term.py b/awx/lib/site-packages/celery/utils/term.py index 7207c6aad3..cec2ab25f6 100644 --- a/awx/lib/site-packages/celery/utils/term.py +++ b/awx/lib/site-packages/celery/utils/term.py @@ -6,11 +6,16 @@ Terminals and colors. """ -from __future__ import absolute_import +from __future__ import absolute_import, unicode_literals import platform -from .encoding import safe_str +from functools import reduce + +from kombu.utils.encoding import safe_str +from celery.five import string + +__all__ = ['colored'] BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) OP_SEQ = '\033[%dm' @@ -18,8 +23,7 @@ RESET_SEQ = '\033[0m' COLOR_SEQ = '\033[1;%dm' fg = lambda s: COLOR_SEQ % s -SYSTEM = platform.system() -IS_WINDOWS = SYSTEM == 'Windows' +IS_WINDOWS = platform.system() == 'Windows' class colored(object): @@ -48,35 +52,36 @@ class colored(object): 'white': self.white} def _add(self, a, b): - return unicode(a) + unicode(b) + return string(a) + string(b) def _fold_no_color(self, a, b): try: A = a.no_color() except AttributeError: - A = unicode(a) + A = string(a) try: B = b.no_color() except AttributeError: - B = unicode(b) - return safe_str(A) + safe_str(B) + B = string(b) + + return safe_str(safe_str(A) + safe_str(B)) def no_color(self): if self.s: - return reduce(self._fold_no_color, self.s) + return safe_str(reduce(self._fold_no_color, self.s)) return '' def embed(self): prefix = '' if self.enabled: prefix = self.op - return prefix + safe_str(reduce(self._add, self.s)) + return safe_str(prefix) + safe_str(reduce(self._add, self.s)) def __unicode__(self): suffix = '' if self.enabled: suffix = RESET_SEQ - return safe_str(self.embed() + suffix) + return safe_str(self.embed() + safe_str(suffix)) def __str__(self): return safe_str(self.__unicode__()) @@ -151,4 +156,4 @@ class colored(object): return self.node(s or [''], RESET_SEQ) def __add__(self, other): - return unicode(self) + unicode(other) + return string(self) + string(other) diff --git a/awx/lib/site-packages/celery/utils/text.py b/awx/lib/site-packages/celery/utils/text.py index fe52f914f7..ffd2d72fa1 100644 --- a/awx/lib/site-packages/celery/utils/text.py +++ b/awx/lib/site-packages/celery/utils/text.py @@ -8,10 +8,14 @@ """ from __future__ import absolute_import -import textwrap +from textwrap import fill from pprint import pformat +__all__ = ['dedent_initial', 'dedent', 'fill_paragraphs', 'join', + 'ensure_2lines', 'abbr', 'abbrtask', 'indent', 'truncate', + 'pluralize', 'pretty'] + def dedent_initial(s, n=4): return s[n:] if s[:n] == ' ' * n else s @@ -22,7 +26,7 @@ def dedent(s, n=4, sep='\n'): def fill_paragraphs(s, width, sep='\n'): - return sep.join(textwrap.fill(p, width) for p in s.split(sep)) + return sep.join(fill(p, width) for p in s.split(sep)) def join(l, sep='\n'): @@ -73,8 +77,10 @@ def pluralize(n, text, suffix='s'): def pretty(value, width=80, nl_width=80, sep='\n', **kw): if isinstance(value, dict): - return '{%s %s' % (sep, pformat(value, 4, nl_width)[1:]) + return '{{{0} {1}'.format(sep, pformat(value, 4, nl_width)[1:]) elif isinstance(value, tuple): - return '%s%s%s' % (sep, ' ' * 4, pformat(value, width=nl_width, **kw)) + return '{0}{1}{2}'.format( + sep, ' ' * 4, pformat(value, width=nl_width, **kw), + ) else: return pformat(value, width=width, **kw) diff --git a/awx/lib/site-packages/celery/utils/threads.py b/awx/lib/site-packages/celery/utils/threads.py index 9826363491..5d42373295 100644 --- a/awx/lib/site-packages/celery/utils/threads.py +++ b/awx/lib/site-packages/celery/utils/threads.py @@ -6,46 +6,40 @@ Threading utilities. """ -from __future__ import absolute_import +from __future__ import absolute_import, print_function import os +import socket import sys import threading import traceback +from contextlib import contextmanager + from celery.local import Proxy -from celery.utils.compat import THREAD_TIMEOUT_MAX +from celery.five import THREAD_TIMEOUT_MAX, items + +__all__ = ['bgThread', 'Local', 'LocalStack', 'LocalManager', + 'get_ident', 'default_socket_timeout'] USE_FAST_LOCALS = os.environ.get('USE_FAST_LOCALS') PY3 = sys.version_info[0] == 3 -NEW_EVENT = (sys.version_info[0] == 3) and (sys.version_info[1] >= 3) - -_Thread = threading.Thread -_Event = threading.Event if NEW_EVENT else threading._Event -active_count = (getattr(threading, 'active_count', None) or - threading.activeCount) -if sys.version_info < (2, 6): - - class Event(_Event): # pragma: no cover - is_set = _Event.isSet - - class Thread(_Thread): # pragma: no cover - is_alive = _Thread.isAlive - daemon = property(_Thread.isDaemon, _Thread.setDaemon) - name = property(_Thread.getName, _Thread.setName) -else: - Event = _Event - Thread = _Thread +@contextmanager +def default_socket_timeout(timeout): + prev = socket.getdefaulttimeout() + socket.setdefaulttimeout(timeout) + yield + socket.setdefaulttimeout(prev) -class bgThread(Thread): +class bgThread(threading.Thread): def __init__(self, name=None, **kwargs): super(bgThread, self).__init__() - self._is_shutdown = Event() - self._is_stopped = Event() + self._is_shutdown = threading.Event() + self._is_stopped = threading.Event() self.daemon = True self.name = name or self.__class__.__name__ @@ -53,7 +47,7 @@ class bgThread(Thread): raise NotImplementedError('subclass responsibility') def on_crash(self, msg, *fmt, **kwargs): - sys.stderr.write((msg + '\n') % fmt) + print(msg.format(*fmt), file=sys.stderr) exc_info = sys.exc_info() try: traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], @@ -68,9 +62,9 @@ class bgThread(Thread): while not shutdown_set(): try: body() - except Exception, exc: + except Exception as exc: try: - self.on_crash('%r crashed: %r', self.name, exc) + self.on_crash('{0!r} crashed: {1!r}', self.name, exc) self._set_stopped() finally: os._exit(1) # exiting by normal means won't work @@ -96,12 +90,15 @@ try: from greenlet import getcurrent as get_ident except ImportError: # pragma: no cover try: - from thread import get_ident # noqa - except ImportError: # pragma: no cover + from _thread import get_ident # noqa + except ImportError: try: - from dummy_thread import get_ident # noqa + from thread import get_ident # noqa except ImportError: # pragma: no cover - from _thread import get_ident # noqa + try: + from _dummy_thread import get_ident # noqa + except ImportError: + from dummy_thread import get_ident # noqa def release_local(local): @@ -135,7 +132,7 @@ class Local(object): object.__setattr__(self, '__ident_func__', get_ident) def __iter__(self): - return iter(self.__storage__.items()) + return iter(items(self.__storage__)) def __call__(self, proxy): """Create a proxy for a name.""" @@ -186,7 +183,7 @@ class _LocalStack(object): item from the stack after using. When the stack is empty it will no longer be bound to the current context (and as such released). - By calling the stack without arguments it returns a proxy that + By calling the stack without arguments it will return a proxy that resolves to the topmost item on the stack. """ @@ -222,7 +219,7 @@ class _LocalStack(object): return rv def pop(self): - """Removes the topmost item from the stack, will return the + """Remove the topmost item from the stack, will return the old value or `None` if the stack was already empty. """ stack = getattr(self._local, 'stack', None) @@ -322,7 +319,7 @@ class _FastLocalStack(threading.local): def __len__(self): return len(self.stack) -if USE_FAST_LOCALS: +if USE_FAST_LOCALS: # pragma: no cover LocalStack = _FastLocalStack else: # - See #706 diff --git a/awx/lib/site-packages/celery/utils/timer2.py b/awx/lib/site-packages/celery/utils/timer2.py index 37be017fb2..d462c65748 100644 --- a/awx/lib/site-packages/celery/utils/timer2.py +++ b/awx/lib/site-packages/celery/utils/timer2.py @@ -7,221 +7,20 @@ """ from __future__ import absolute_import -from __future__ import with_statement -import atexit -import heapq import os import sys import threading -from datetime import datetime -from functools import wraps from itertools import count -from time import time, sleep -from weakref import proxy as weakrefproxy +from time import sleep -from celery.utils.compat import THREAD_TIMEOUT_MAX -from celery.utils.timeutils import timedelta_seconds, timezone -from kombu.log import get_logger +from celery.five import THREAD_TIMEOUT_MAX +from kombu.async.timer import Entry, Timer as Schedule, to_timestamp, logger -VERSION = (1, 0, 0) -__version__ = '.'.join(str(p) for p in VERSION) -__author__ = 'Ask Solem' -__contact__ = 'ask@celeryproject.org' -__homepage__ = 'http://github.com/ask/timer2/' -__docformat__ = 'restructuredtext' - -DEFAULT_MAX_INTERVAL = 2 TIMER_DEBUG = os.environ.get('TIMER_DEBUG') -EPOCH = datetime.utcfromtimestamp(0).replace(tzinfo=timezone.utc) -IS_PYPY = hasattr(sys, 'pypy_version_info') -logger = get_logger('timer2') - - -class Entry(object): - if not IS_PYPY: - __slots__ = ( - 'fun', 'args', 'kwargs', 'tref', 'cancelled', - '_last_run', '__weakref__', - ) - - def __init__(self, fun, args=None, kwargs=None): - self.fun = fun - self.args = args or [] - self.kwargs = kwargs or {} - self.tref = weakrefproxy(self) - self._last_run = None - self.cancelled = False - - def __call__(self): - return self.fun(*self.args, **self.kwargs) - - def cancel(self): - try: - self.tref.cancelled = True - except ReferenceError: - pass - - def __repr__(self): - return ' hash(other) - - def __eq__(self, other): - return hash(self) == hash(other) - - -def to_timestamp(d, default_timezone=timezone.utc): - if isinstance(d, datetime): - if d.tzinfo is None: - d = d.replace(tzinfo=default_timezone) - return timedelta_seconds(d - EPOCH) - return d - - -class Schedule(object): - """ETA scheduler.""" - Entry = Entry - - on_error = None - - def __init__(self, max_interval=None, on_error=None, **kwargs): - self.max_interval = float(max_interval or DEFAULT_MAX_INTERVAL) - self.on_error = on_error or self.on_error - self._queue = [] - - def apply_entry(self, entry): - try: - entry() - except Exception, exc: - if not self.handle_error(exc): - logger.error('Error in timer: %r', exc, exc_info=True) - - def handle_error(self, exc_info): - if self.on_error: - self.on_error(exc_info) - return True - - def stop(self): - pass - - def enter(self, entry, eta=None, priority=0): - """Enter function into the scheduler. - - :param entry: Item to enter. - :keyword eta: Scheduled time as a :class:`datetime.datetime` object. - :keyword priority: Unused. - - """ - if eta is None: - eta = time() - if isinstance(eta, datetime): - try: - eta = to_timestamp(eta) - except Exception, exc: - if not self.handle_error(exc): - raise - return - return self._enter(eta, priority, entry) - - def _enter(self, eta, priority, entry): - heapq.heappush(self._queue, (eta, priority, entry)) - return entry - - def apply_at(self, eta, fun, args=(), kwargs={}, priority=0): - return self.enter(self.Entry(fun, args, kwargs), eta, priority) - - def enter_after(self, msecs, entry, priority=0, time=time): - return self.enter(entry, time() + (msecs / 1000.0), priority) - - def apply_after(self, msecs, fun, args=(), kwargs={}, priority=0): - return self.enter_after(msecs, self.Entry(fun, args, kwargs), priority) - - def apply_interval(self, msecs, fun, args=(), kwargs={}, priority=0): - tref = self.Entry(fun, args, kwargs) - secs = msecs * 1000.0 - - @wraps(fun) - def _reschedules(*args, **kwargs): - last, now = tref._last_run, time() - lsince = (now - tref._last_run) * 1000.0 if last else msecs - try: - if lsince and lsince >= msecs: - tref._last_run = now - return fun(*args, **kwargs) - finally: - if not tref.cancelled: - last = tref._last_run - next = secs - (now - last) if last else secs - self.enter_after(next / 1000.0, tref, priority) - - tref.fun = _reschedules - tref._last_run = None - return self.enter_after(msecs, tref, priority) - - @property - def schedule(self): - return self - - def __iter__(self, min=min, nowfun=time, pop=heapq.heappop, - push=heapq.heappush): - """The iterator yields the time to sleep for between runs.""" - max_interval = self.max_interval - queue = self._queue - - while 1: - if queue: - eta, priority, entry = verify = queue[0] - now = nowfun() - - if now < eta: - yield min(eta - now, max_interval), None - else: - event = pop(queue) - - if event is verify: - if not entry.cancelled: - yield None, entry - continue - else: - push(queue, event) - else: - yield None, None - - def empty(self): - """Is the schedule empty?""" - return not self._queue - - def clear(self): - self._queue[:] = [] # used because we can't replace the object - # and the operation is atomic. - - def info(self): - return ({'eta': eta, 'priority': priority, 'item': item} - for eta, priority, item in self.queue) - - def cancel(self, tref): - tref.cancel() - - @property - def queue(self, _pop=heapq.heappop): - """Snapshot of underlying datastructure.""" - events = list(self._queue) - return [_pop(i) for i in [events] * len(events)] +__all__ = ['Entry', 'Schedule', 'Timer', 'to_timestamp'] class Timer(threading.Thread): @@ -230,7 +29,7 @@ class Timer(threading.Thread): running = False on_tick = None - _timer_count = count(1).next + _timer_count = count(1) if TIMER_DEBUG: # pragma: no cover def start(self, *args, **kwargs): @@ -240,22 +39,22 @@ class Timer(threading.Thread): super(Timer, self).start(*args, **kwargs) def __init__(self, schedule=None, on_error=None, on_tick=None, - max_interval=None, **kwargs): + on_start=None, max_interval=None, **kwargs): self.schedule = schedule or self.Schedule(on_error=on_error, max_interval=max_interval) + self.on_start = on_start self.on_tick = on_tick or self.on_tick - threading.Thread.__init__(self) self._is_shutdown = threading.Event() self._is_stopped = threading.Event() self.mutex = threading.Lock() self.not_empty = threading.Condition(self.mutex) - self.setDaemon(True) - self.setName('Timer-%s' % (self._timer_count(), )) + self.daemon = True + self.name = 'Timer-{0}'.format(next(self._timer_count)) def _next_entry(self): with self.not_empty: - delay, entry = self.scheduler.next() + delay, entry = next(self.scheduler) if entry is None: if delay is None: self.not_empty.wait(1.0) @@ -282,7 +81,7 @@ class Timer(threading.Thread): # we lost the race at interpreter shutdown, # so gc collected built-in modules. pass - except Exception, exc: + except Exception as exc: logger.error('Thread Timer crashed: %r', exc, exc_info=True) os._exit(1) @@ -295,6 +94,8 @@ class Timer(threading.Thread): def ensure_started(self): if not self.running and not self.isAlive(): + if self.on_start: + self.on_start(self) self.start() def _do_enter(self, meth, *args, **kwargs): @@ -305,22 +106,22 @@ class Timer(threading.Thread): return entry def enter(self, entry, eta, priority=None): - return self._do_enter('enter', entry, eta, priority=priority) + return self._do_enter('enter_at', entry, eta, priority=priority) - def apply_at(self, *args, **kwargs): - return self._do_enter('apply_at', *args, **kwargs) + def call_at(self, *args, **kwargs): + return self._do_enter('call_at', *args, **kwargs) def enter_after(self, *args, **kwargs): return self._do_enter('enter_after', *args, **kwargs) - def apply_after(self, *args, **kwargs): - return self._do_enter('apply_after', *args, **kwargs) + def call_after(self, *args, **kwargs): + return self._do_enter('call_after', *args, **kwargs) - def apply_interval(self, *args, **kwargs): - return self._do_enter('apply_interval', *args, **kwargs) + def call_repeatedly(self, *args, **kwargs): + return self._do_enter('call_repeatedly', *args, **kwargs) - def exit_after(self, msecs, priority=10): - self.apply_after(msecs, sys.exit, priority) + def exit_after(self, secs, priority=10): + self.call_after(secs, sys.exit, priority) def cancel(self, tref): tref.cancel() @@ -329,20 +130,15 @@ class Timer(threading.Thread): self.schedule.clear() def empty(self): - return self.schedule.empty() + return not len(self) + + def __len__(self): + return len(self.schedule) + + def __bool__(self): + return True + __nonzero__ = __bool__ @property def queue(self): return self.schedule.queue - -default_timer = _default_timer = Timer() -apply_after = _default_timer.apply_after -apply_at = _default_timer.apply_at -apply_interval = _default_timer.apply_interval -enter_after = _default_timer.enter_after -enter = _default_timer.enter -exit_after = _default_timer.exit_after -cancel = _default_timer.cancel -clear = _default_timer.clear - -atexit.register(_default_timer.stop) diff --git a/awx/lib/site-packages/celery/utils/timeutils.py b/awx/lib/site-packages/celery/utils/timeutils.py index 67f105e996..adfac3bc37 100644 --- a/awx/lib/site-packages/celery/utils/timeutils.py +++ b/awx/lib/site-packages/celery/utils/timeutils.py @@ -11,55 +11,52 @@ from __future__ import absolute_import import os import time as _time -from kombu.utils import cached_property +from calendar import monthrange +from datetime import date, datetime, timedelta, tzinfo -from datetime import datetime, timedelta, tzinfo -from dateutil import tz -from dateutil.parser import parse as parse_iso8601 +from kombu.utils import cached_property, reprcall +from kombu.utils.compat import timedelta_seconds -from celery.exceptions import ImproperlyConfigured +from pytz import timezone as _timezone, AmbiguousTimeError +from celery.five import string_t + +from .functional import dictfilter +from .iso8601 import parse_iso8601 from .text import pluralize -try: - import pytz - from pytz import AmbiguousTimeError -except ImportError: # pragma: no cover - pytz = None # noqa - - class AmbiguousTimeError(Exception): # noqa - pass - +__all__ = ['LocalTimezone', 'timezone', 'maybe_timedelta', 'timedelta_seconds', + 'delta_resolution', 'remaining', 'rate', 'weekday', + 'humanize_seconds', 'maybe_iso8601', 'is_naive', 'make_aware', + 'localize', 'to_utc', 'maybe_make_aware', 'ffwd', 'utcoffset', + 'adjust_timestamp', 'maybe_s_to_ms'] C_REMDEBUG = os.environ.get('C_REMDEBUG', False) - DAYNAMES = 'sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat' -WEEKDAYS = dict((name, dow) for name, dow in zip(DAYNAMES, range(7))) +WEEKDAYS = dict(zip(DAYNAMES, range(7))) RATE_MODIFIER_MAP = {'s': lambda n: n, 'm': lambda n: n / 60.0, 'h': lambda n: n / 60.0 / 60.0} - -HAVE_TIMEDELTA_TOTAL_SECONDS = hasattr(timedelta, 'total_seconds') - -TIME_UNITS = (('day', 60 * 60 * 24.0, lambda n: '%.2f' % n), - ('hour', 60 * 60.0, lambda n: '%.2f' % n), - ('minute', 60.0, lambda n: '%.2f' % n), - ('second', 1.0, lambda n: '%.2f' % n)) +TIME_UNITS = (('day', 60 * 60 * 24.0, lambda n: format(n, '.2f')), + ('hour', 60 * 60.0, lambda n: format(n, '.2f')), + ('minute', 60.0, lambda n: format(n, '.2f')), + ('second', 1.0, lambda n: format(n, '.2f'))) ZERO = timedelta(0) _local_timezone = None +__timezone__ = -_time.timezone +__altzone__ = -_time.altzone + class LocalTimezone(tzinfo): - """ - Local time implementation taken from Python's docs. + """Local time implementation taken from Python's docs. - Used only when pytz isn't available, and most likely inaccurate. If you're - having trouble with this class, don't waste your time, just install pytz. + Used only when UTC is not enabled. """ def __init__(self): @@ -74,7 +71,7 @@ class LocalTimezone(tzinfo): tzinfo.__init__(self) def __repr__(self): - return "" + return '' def utcoffset(self, dt): if self._isdst(dt): @@ -115,19 +112,14 @@ class _Zone(object): def to_system(self, dt): return localize(dt, self.local) - def to_local_fallback(self, dt, *args, **kwargs): + def to_local_fallback(self, dt): if is_naive(dt): return make_aware(dt, self.local) return localize(dt, self.local) def get_timezone(self, zone): - if isinstance(zone, basestring): - if pytz is None: - if zone == 'UTC': - return tz.gettz('UTC') - raise ImproperlyConfigured( - 'Timezones requires the pytz library') - return pytz.timezone(zone) + if isinstance(zone, string_t): + return _timezone(zone) return zone @cached_property @@ -147,29 +139,6 @@ def maybe_timedelta(delta): return delta -if HAVE_TIMEDELTA_TOTAL_SECONDS: # pragma: no cover - - def timedelta_seconds(delta): - """Convert :class:`datetime.timedelta` to seconds. - - Doesn't account for negative values. - - """ - return max(delta.total_seconds(), 0) - -else: # pragma: no cover - - def timedelta_seconds(delta): # noqa - """Convert :class:`datetime.timedelta` to seconds. - - Doesn't account for negative values. - - """ - if delta.days < 0: - return 0 - return delta.days * 86400 + delta.seconds + (delta.microseconds / 10e5) - - def delta_resolution(dt, delta): """Round a datetime to the resolution of a timedelta. @@ -188,11 +157,11 @@ def delta_resolution(dt, delta): args = dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second for res, predicate in resolutions: if predicate(delta) >= 1.0: - return datetime(*args[:res]) + return datetime(*args[:res], tzinfo=dt.tzinfo) return dt -def remaining(start, ends_in, now=None, relative=False, debug=False): +def remaining(start, ends_in, now=None, relative=False): """Calculate the remaining time for a start date and a timedelta. e.g. "how many seconds left for 30 seconds after start?" @@ -211,17 +180,17 @@ def remaining(start, ends_in, now=None, relative=False, debug=False): if relative: end_date = delta_resolution(end_date, ends_in) ret = end_date - now - if C_REMDEBUG: - print('rem: NOW:%s START:%s END_DATE:%s REM:%s' % ( - now, start, end_date, ret)) + if C_REMDEBUG: # pragma: no cover + print('rem: NOW:%r START:%r ENDS_IN:%r END_DATE:%s REM:%s' % ( + now, start, ends_in, end_date, ret)) return ret def rate(rate): - """Parses rate strings, such as `"100/m"`, `"2/h"` or `"0.5/s"` - and converts them to seconds.""" + """Parse rate strings, such as `"100/m"`, `"2/h"` or `"0.5/s"` + and convert them to seconds.""" if rate: - if isinstance(rate, basestring): + if isinstance(rate, string_t): ops, _, modifier = rate.partition('/') return RATE_MODIFIER_MAP[modifier or 's'](float(ops)) or 0 return rate or 0 @@ -245,7 +214,7 @@ def weekday(name): raise KeyError(name) -def humanize_seconds(secs, prefix='', sep=''): +def humanize_seconds(secs, prefix='', sep='', now='now'): """Show seconds in human form, e.g. 60 is "1 minute", 7200 is "2 hours". @@ -257,9 +226,9 @@ def humanize_seconds(secs, prefix='', sep=''): for unit, divider, formatter in TIME_UNITS: if secs >= divider: w = secs / divider - return '%s%s%s %s' % (prefix, sep, formatter(w), - pluralize(w, unit)) - return 'now' + return '{0}{1}{2} {3}'.format(prefix, sep, formatter(w), + pluralize(w, unit)) + return now def maybe_iso8601(dt): @@ -272,7 +241,7 @@ def maybe_iso8601(dt): def is_naive(dt): - """Returns :const:`True` if the datetime is naive + """Return :const:`True` if the datetime is naive (does not have timezone information).""" return dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None @@ -317,5 +286,62 @@ def to_utc(dt): def maybe_make_aware(dt, tz=None): if is_naive(dt): dt = to_utc(dt) - return localize(dt, - timezone.utc if tz is None else timezone.tz_or_local(tz)) + return localize( + dt, timezone.utc if tz is None else timezone.tz_or_local(tz), + ) + + +class ffwd(object): + """Version of relativedelta that only supports addition.""" + + def __init__(self, year=None, month=None, weeks=0, weekday=None, day=None, + hour=None, minute=None, second=None, microsecond=None, + **kwargs): + self.year = year + self.month = month + self.weeks = weeks + self.weekday = weekday + self.day = day + self.hour = hour + self.minute = minute + self.second = second + self.microsecond = microsecond + self.days = weeks * 7 + self._has_time = self.hour is not None or self.minute is not None + + def __repr__(self): + return reprcall('ffwd', (), self._fields(weeks=self.weeks, + weekday=self.weekday)) + + def __radd__(self, other): + if not isinstance(other, date): + return NotImplemented + year = self.year or other.year + month = self.month or other.month + day = min(monthrange(year, month)[1], self.day or other.day) + ret = other.replace(**dict(dictfilter(self._fields()), + year=year, month=month, day=day)) + if self.weekday is not None: + ret += timedelta(days=(7 - ret.weekday() + self.weekday) % 7) + return ret + timedelta(days=self.days) + + def _fields(self, **extra): + return dictfilter({ + 'year': self.year, 'month': self.month, 'day': self.day, + 'hour': self.hour, 'minute': self.minute, + 'second': self.second, 'microsecond': self.microsecond, + }, **extra) + + +def utcoffset(): + if _time.daylight: + return (__timezone__ + __altzone__) // 3600 + return __timezone__ // 3600 + + +def adjust_timestamp(ts, offset, here=utcoffset): + return ts - (offset - here()) * 3600 + + +def maybe_s_to_ms(v): + return int(float(v) * 1000.0) if v is not None else v diff --git a/awx/lib/site-packages/celery/worker/__init__.py b/awx/lib/site-packages/celery/worker/__init__.py index 04f7bcd2e4..f34f986b01 100644 --- a/awx/lib/site-packages/celery/worker/__init__.py +++ b/awx/lib/site-packages/celery/worker/__init__.py @@ -5,421 +5,222 @@ :class:`WorkController` can be used to instantiate in-process workers. - The worker consists of several components, all managed by boot-steps - (mod:`celery.worker.bootsteps`). + The worker consists of several components, all managed by bootsteps + (mod:`celery.bootsteps`). """ from __future__ import absolute_import -import atexit -import logging +import os import socket import sys -import time import traceback +try: + import resource +except ImportError: # pragma: no cover + resource = None # noqa -from functools import partial -from weakref import WeakValueDictionary - -from billiard.exceptions import WorkerLostError +from billiard import cpu_count from billiard.util import Finalize from kombu.syn import detect_environment +from celery import bootsteps +from celery.bootsteps import RUN, TERMINATE from celery import concurrency as _concurrency from celery import platforms from celery import signals -from celery.app import app_or_default -from celery.app.abstract import configurated, from_config -from celery.exceptions import SystemTerminate, TaskRevokedError -from celery.utils.functional import noop -from celery.utils.imports import qualname, reload_from_cwd -from celery.utils.log import get_logger -from celery.utils.threads import Event -from celery.utils.timer2 import Schedule +from celery.exceptions import ( + ImproperlyConfigured, SystemTerminate, TaskRevokedError, +) +from celery.five import string_t, values +from celery.utils import nodename, nodesplit, worker_direct +from celery.utils.imports import reload_from_cwd +from celery.utils.log import mlevel, worker_logger as logger -from . import bootsteps from . import state -from .buckets import TaskBucket, AsyncTaskBucket, FastQueue -from .hub import Hub, BoundedSemaphore -#: Worker states -RUN = 0x1 -CLOSE = 0x2 -TERMINATE = 0x3 +__all__ = ['WorkController', 'default_nodename'] -#: Default socket timeout at shutdown. -SHUTDOWN_SOCKET_TIMEOUT = 5.0 +SELECT_UNKNOWN_QUEUE = """\ +Trying to select queue subset of {0!r}, but queue {1} is not +defined in the CELERY_QUEUES setting. -logger = get_logger(__name__) +If you want to automatically declare unknown queues you can +enable the CELERY_CREATE_MISSING_QUEUES setting. +""" + +DESELECT_UNKNOWN_QUEUE = """\ +Trying to deselect queue subset of {0!r}, but queue {1} is not +defined in the CELERY_QUEUES setting. +""" -class Namespace(bootsteps.Namespace): - """This is the boot-step namespace of the :class:`WorkController`. - - It loads modules from :setting:`CELERYD_BOOT_STEPS`, and its - own set of built-in boot-step modules. - - """ - name = 'worker' - builtin_boot_steps = ('celery.worker.autoscale', - 'celery.worker.autoreload', - 'celery.worker.consumer', - 'celery.worker.mediator') - - def modules(self): - return self.builtin_boot_steps + self.app.conf.CELERYD_BOOT_STEPS +def str_to_list(s): + if isinstance(s, string_t): + return s.split(',') + return s -class Pool(bootsteps.StartStopComponent): - """The pool component. - - Describes how to initialize the worker pool, and starts and stops - the pool during worker startup/shutdown. - - Adds attributes: - - * autoscale - * pool - * max_concurrency - * min_concurrency - - """ - name = 'worker.pool' - requires = ('queues', 'beat', ) - - def __init__(self, w, - autoscale=None, autoreload=False, no_execv=False, **kwargs): - w.autoscale = autoscale - w.pool = None - w.max_concurrency = None - w.min_concurrency = w.concurrency - w.no_execv = no_execv - if w.autoscale: - w.max_concurrency, w.min_concurrency = w.autoscale - self.autoreload_enabled = autoreload - - def on_poll_init(self, pool, w, hub): - apply_after = hub.timer.apply_after - apply_at = hub.timer.apply_at - on_soft_timeout = pool.on_soft_timeout - on_hard_timeout = pool.on_hard_timeout - maintain_pool = pool.maintain_pool - add_reader = hub.add_reader - remove = hub.remove - now = time.time - cache = pool._pool._cache - - # did_start_ok will verify that pool processes were able to start, - # but this will only work the first time we start, as - # maxtasksperchild will mess up metrics. - if not w.consumer.restart_count and not pool.did_start_ok(): - raise WorkerLostError('Could not start worker processes') - - # need to handle pool results before every task - # since multiple tasks can be received in a single poll() - hub.on_task.append(pool.maybe_handle_result) - - hub.update_readers(pool.readers) - for handler, interval in pool.timers.iteritems(): - hub.timer.apply_interval(interval * 1000.0, handler) - - trefs = pool._tref_for_id = WeakValueDictionary() - - def _discard_tref(job): - try: - tref = trefs.pop(job) - tref.cancel() - del(tref) - except (KeyError, AttributeError): - pass # out of scope - - def _on_hard_timeout(job): - try: - result = cache[job] - except KeyError: - pass # job ready - else: - on_hard_timeout(result) - finally: - # remove tref - _discard_tref(job) - - def _on_soft_timeout(job, soft, hard, hub): - if hard: - trefs[job] = apply_at( - now() + (hard - soft), - _on_hard_timeout, (job, ), - ) - try: - result = cache[job] - except KeyError: - pass # job ready - else: - on_soft_timeout(result) - finally: - if not hard: - # remove tref - _discard_tref(job) - - def on_timeout_set(R, soft, hard): - if soft: - trefs[R._job] = apply_after( - soft * 1000.0, - _on_soft_timeout, (R._job, soft, hard, hub), - ) - elif hard: - trefs[R._job] = apply_after( - hard * 1000.0, - _on_hard_timeout, (R._job, ) - ) - - def on_timeout_cancel(R): - _discard_tref(R._job) - - pool.init_callbacks( - on_process_up=lambda w: add_reader(w.sentinel, maintain_pool), - on_process_down=lambda w: remove(w.sentinel), - on_timeout_set=on_timeout_set, - on_timeout_cancel=on_timeout_cancel, - ) - - def create(self, w, semaphore=None, max_restarts=None): - threaded = not w.use_eventloop - procs = w.min_concurrency - forking_enable = w.no_execv or not w.force_execv - if not threaded: - semaphore = w.semaphore = BoundedSemaphore(procs) - w._quick_acquire = w.semaphore.acquire - w._quick_release = w.semaphore.release - max_restarts = 100 - allow_restart = self.autoreload_enabled or w.pool_restarts - pool = w.pool = self.instantiate( - w.pool_cls, w.min_concurrency, - initargs=(w.app, w.hostname), - maxtasksperchild=w.max_tasks_per_child, - timeout=w.task_time_limit, - soft_timeout=w.task_soft_time_limit, - putlocks=w.pool_putlocks and threaded, - lost_worker_timeout=w.worker_lost_wait, - threads=threaded, - max_restarts=max_restarts, - allow_restart=allow_restart, - forking_enable=forking_enable, - semaphore=semaphore, - callbacks_propagate=( - w._conninfo.connection_errors + w._conninfo.channel_errors - ), - ) - if w.hub: - w.hub.on_init.append(partial(self.on_poll_init, pool, w)) - return pool +def default_nodename(hostname): + name, host = nodesplit(hostname or '') + return nodename(name or 'celery', host or socket.gethostname()) -class Beat(bootsteps.StartStopComponent): - """Component used to embed a celerybeat process. - - This will only be enabled if the ``beat`` - argument is set. - - """ - name = 'worker.beat' - - def __init__(self, w, beat=False, **kwargs): - self.enabled = w.beat = beat - w.beat = None - - def create(self, w): - from celery.beat import EmbeddedService - b = w.beat = EmbeddedService(app=w.app, - schedule_filename=w.schedule_filename, - scheduler_cls=w.scheduler_cls) - return b - - -class Queues(bootsteps.Component): - """This component initializes the internal queues - used by the worker.""" - name = 'worker.queues' - requires = ('ev', ) - - def create(self, w): - BucketType = TaskBucket - w.start_mediator = w.pool_cls.requires_mediator - if not w.pool_cls.rlimit_safe: - BucketType = AsyncTaskBucket - process_task = w.process_task - if w.use_eventloop: - BucketType = AsyncTaskBucket - if w.pool_putlocks and w.pool_cls.uses_semaphore: - process_task = w.process_task_sem - if w.disable_rate_limits or not w.start_mediator: - w.ready_queue = FastQueue() - if not w.start_mediator: - w.ready_queue.put = process_task - else: - w.ready_queue = BucketType( - task_registry=w.app.tasks, callback=process_task, worker=w, - ) - - -class EvLoop(bootsteps.StartStopComponent): - name = 'worker.ev' - - def __init__(self, w, **kwargs): - w.hub = None - - def include_if(self, w): - return w.use_eventloop - - def create(self, w): - w.timer = Schedule(max_interval=10) - hub = w.hub = Hub(w.timer) - return hub - - -class Timers(bootsteps.Component): - """This component initializes the internal timers used by the worker.""" - name = 'worker.timers' - requires = ('pool', ) - - def include_if(self, w): - return not w.use_eventloop - - def create(self, w): - if not w.timer_cls: - # Default Timer is set by the pool, as e.g. eventlet - # needs a custom implementation. - w.timer_cls = w.pool.Timer - w.timer = self.instantiate(w.pool.Timer, - max_interval=w.timer_precision, - on_timer_error=self.on_timer_error, - on_timer_tick=self.on_timer_tick) - - def on_timer_error(self, exc): - logger.error('Timer error: %r', exc, exc_info=True) - - def on_timer_tick(self, delay): - logger.debug('Timer wake-up! Next eta %s secs.', delay) - - -class StateDB(bootsteps.Component): - """This component sets up the workers state db if enabled.""" - name = 'worker.state-db' - - def __init__(self, w, **kwargs): - self.enabled = w.state_db - w._persistence = None - - def create(self, w): - w._persistence = state.Persistent(w.state_db) - atexit.register(w._persistence.save) - - -class WorkController(configurated): +class WorkController(object): """Unmanaged worker instance.""" - RUN = RUN - CLOSE = CLOSE - TERMINATE = TERMINATE - app = None - concurrency = from_config() - loglevel = logging.ERROR - logfile = from_config('log_file') - send_events = from_config() - pool_cls = from_config('pool') - consumer_cls = from_config('consumer') - mediator_cls = from_config('mediator') - timer_cls = from_config('timer') - timer_precision = from_config('timer_precision') - autoscaler_cls = from_config('autoscaler') - autoreloader_cls = from_config('autoreloader') - schedule_filename = from_config() - scheduler_cls = from_config('celerybeat_scheduler') - task_time_limit = from_config() - task_soft_time_limit = from_config() - max_tasks_per_child = from_config() - pool_putlocks = from_config() - pool_restarts = from_config() - force_execv = from_config() - prefetch_multiplier = from_config() - state_db = from_config() - disable_rate_limits = from_config() - worker_lost_wait = from_config() - _state = None - _running = 0 + pidlock = None + blueprint = None + pool = None + semaphore = None - def __init__(self, loglevel=None, hostname=None, ready_callback=noop, - queues=None, app=None, pidfile=None, use_eventloop=None, - **kwargs): - self.app = app_or_default(app or self.app) + class Blueprint(bootsteps.Blueprint): + """Worker bootstep blueprint.""" + name = 'Worker' + default_steps = set([ + 'celery.worker.components:Hub', + 'celery.worker.components:Queues', + 'celery.worker.components:Pool', + 'celery.worker.components:Beat', + 'celery.worker.components:Timer', + 'celery.worker.components:StateDB', + 'celery.worker.components:Consumer', + 'celery.worker.autoscale:WorkerComponent', + 'celery.worker.autoreload:WorkerComponent', - self._shutdown_complete = Event() - self.setup_defaults(kwargs, namespace='celeryd') - self.app.select_queues(queues) # select queues subset. + ]) - # Options - self.loglevel = loglevel or self.loglevel - self.hostname = hostname or socket.gethostname() - self.ready_callback = ready_callback + def __init__(self, app=None, hostname=None, **kwargs): + self.app = app or self.app + self.hostname = default_nodename(hostname) + self.app.loader.init_worker() + self.on_before_init(**kwargs) + self.setup_defaults(**kwargs) + self.on_after_init(**kwargs) + + self.setup_instance(**self.prepare_args(**kwargs)) self._finalize = [ - Finalize(self, self.stop, exitpriority=1), Finalize(self, self._send_worker_shutdown, exitpriority=10), ] + + def setup_instance(self, queues=None, ready_callback=None, pidfile=None, + include=None, use_eventloop=None, exclude_queues=None, + **kwargs): self.pidfile = pidfile - self.pidlock = None + self.setup_queues(queues, exclude_queues) + self.setup_includes(str_to_list(include)) + + # Set default concurrency + if not self.concurrency: + try: + self.concurrency = cpu_count() + except NotImplementedError: + self.concurrency = 2 + + # Options + self.loglevel = mlevel(self.loglevel) + self.ready_callback = ready_callback or self.on_consumer_ready + # this connection is not established, only used for params self._conninfo = self.app.connection() self.use_eventloop = ( self.should_use_eventloop() if use_eventloop is None else use_eventloop ) + self.options = kwargs + signals.worker_init.send(sender=self) + + # Initialize bootsteps + self.pool_cls = _concurrency.get_implementation(self.pool_cls) + self.steps = [] + self.on_init_blueprint() + self.blueprint = self.Blueprint(app=self.app, + on_start=self.on_start, + on_close=self.on_close, + on_stopped=self.on_stopped) + self.blueprint.apply(self, **kwargs) + + def on_init_blueprint(self): + pass + + def on_before_init(self, **kwargs): + pass + + def on_after_init(self, **kwargs): + pass + + def on_start(self): + if self.pidfile: + self.pidlock = platforms.create_pidlock(self.pidfile) + + def on_consumer_ready(self, consumer): + pass + + def on_close(self): + self.app.loader.shutdown_worker() + + def on_stopped(self): + self.timer.stop() + self.consumer.shutdown() + + if self.pidlock: + self.pidlock.release() + + def setup_queues(self, include, exclude=None): + include = str_to_list(include) + exclude = str_to_list(exclude) + try: + self.app.amqp.queues.select(include) + except KeyError as exc: + raise ImproperlyConfigured( + SELECT_UNKNOWN_QUEUE.format(include, exc)) + try: + self.app.amqp.queues.deselect(exclude) + except KeyError as exc: + raise ImproperlyConfigured( + DESELECT_UNKNOWN_QUEUE.format(exclude, exc)) + if self.app.conf.CELERY_WORKER_DIRECT: + self.app.amqp.queues.select_add(worker_direct(self.hostname)) + + def setup_includes(self, includes): # Update celery_include to have all known task modules, so that we # ensure all task modules are imported in case an execv happens. + prev = tuple(self.app.conf.CELERY_INCLUDE) + if includes: + prev += tuple(includes) + [self.app.loader.import_task_module(m) for m in includes] + self.include = includes task_modules = set(task.__class__.__module__ - for task in self.app.tasks.itervalues()) - self.app.conf.CELERY_INCLUDE = tuple( - set(self.app.conf.CELERY_INCLUDE) | task_modules, - ) + for task in values(self.app.tasks)) + self.app.conf.CELERY_INCLUDE = tuple(set(prev) | task_modules) - # Initialize boot steps - self.pool_cls = _concurrency.get_implementation(self.pool_cls) - self.components = [] - self.namespace = Namespace(app=self.app).apply(self, **kwargs) + def prepare_args(self, **kwargs): + return kwargs def _send_worker_shutdown(self): signals.worker_shutdown.send(sender=self) def start(self): """Starts the workers main loop.""" - self._state = self.RUN - if self.pidfile: - self.pidlock = platforms.create_pidlock(self.pidfile) try: - for i, component in enumerate(self.components): - logger.debug('Starting %s...', qualname(component)) - self._running = i + 1 - if component: - component.start() - logger.debug('%s OK!', qualname(component)) + self.blueprint.start(self) except SystemTerminate: self.terminate() - except Exception, exc: - logger.error('Unrecoverable error: %r', exc, - exc_info=True) + except Exception as exc: + logger.error('Unrecoverable error: %r', exc, exc_info=True) self.stop() except (KeyboardInterrupt, SystemExit): self.stop() - # Will only get here if running green, - # makes sure all greenthreads have exited. - self._shutdown_complete.wait() + def register_with_event_loop(self, hub): + self.blueprint.send_all(self, 'register_with_event_loop', args=(hub, )) - def process_task_sem(self, req): - return self._quick_acquire(self.process_task, req) + def _process_task_sem(self, req): + return self._quick_acquire(self._process_task, req) - def process_task(self, req): + def _process_task(self, req): """Process task by sending it to the pool of workers.""" try: req.execute_using_pool(self.pool) @@ -428,15 +229,9 @@ class WorkController(configurated): self._quick_release() # Issue 877 except AttributeError: pass - except Exception, exc: + except Exception as exc: logger.critical('Internal error: %r\n%s', exc, traceback.format_exc(), exc_info=True) - except SystemTerminate: - self.terminate() - raise - except BaseException, exc: - self.stop() - raise exc def signal_consumer_close(self): try: @@ -450,52 +245,24 @@ class WorkController(configurated): def stop(self, in_sighandler=False): """Graceful shutdown of the worker server.""" - self.signal_consumer_close() - if not in_sighandler or self.pool.signal_safe: - self._shutdown(warm=True) + if self.blueprint.state == RUN: + self.signal_consumer_close() + if not in_sighandler or self.pool.signal_safe: + self._shutdown(warm=True) def terminate(self, in_sighandler=False): """Not so graceful shutdown of the worker server.""" - self.signal_consumer_close() - if not in_sighandler or self.pool.signal_safe: - self._shutdown(warm=False) + if self.blueprint.state != TERMINATE: + self.signal_consumer_close() + if not in_sighandler or self.pool.signal_safe: + self._shutdown(warm=False) def _shutdown(self, warm=True): - what = 'Stopping' if warm else 'Terminating' - socket_timeout = socket.getdefaulttimeout() - socket.setdefaulttimeout(SHUTDOWN_SOCKET_TIMEOUT) # Issue 975 - - if self._state in (self.CLOSE, self.TERMINATE): - return - - self.app.loader.shutdown_worker() - - if self.pool: - self.pool.close() - - if self._state != self.RUN or self._running != len(self.components): - # Not fully started, can safely exit. - self._state = self.TERMINATE - self._shutdown_complete.set() - return - self._state = self.CLOSE - - for component in reversed(self.components): - logger.debug('%s %s...', what, qualname(component)) - if component: - stop = component.stop - if not warm: - stop = getattr(component, 'terminate', None) or stop - stop() - - self.timer.stop() - self.consumer.close_connection() - - if self.pidlock: - self.pidlock.release() - self._state = self.TERMINATE - socket.setdefaulttimeout(socket_timeout) - self._shutdown_complete.set() + # if blueprint does not exist it means that we had an + # error before the bootsteps could be initialized. + if self.blueprint is not None: + self.blueprint.stop(self, terminate=not warm) + self.blueprint.join() def reload(self, modules=None, reload=False, reloader=None): modules = self.app.loader.task_modules if modules is None else modules @@ -510,6 +277,106 @@ class WorkController(configurated): reload_from_cwd(sys.modules[module], reloader) self.pool.restart() + def info(self): + return {'total': self.state.total_count, + 'pid': os.getpid(), + 'clock': str(self.app.clock)} + + def rusage(self): + if resource is None: + raise NotImplementedError('rusage not supported by this platform') + s = resource.getrusage(resource.RUSAGE_SELF) + return { + 'utime': s.ru_utime, + 'stime': s.ru_stime, + 'maxrss': s.ru_maxrss, + 'ixrss': s.ru_ixrss, + 'idrss': s.ru_idrss, + 'isrss': s.ru_isrss, + 'minflt': s.ru_minflt, + 'majflt': s.ru_majflt, + 'nswap': s.ru_nswap, + 'inblock': s.ru_inblock, + 'oublock': s.ru_oublock, + 'msgsnd': s.ru_msgsnd, + 'msgrcv': s.ru_msgrcv, + 'nsignals': s.ru_nsignals, + 'nvcsw': s.ru_nvcsw, + 'nivcsw': s.ru_nivcsw, + } + + def stats(self): + info = self.info() + info.update(self.blueprint.info(self)) + info.update(self.consumer.blueprint.info(self.consumer)) + try: + info['rusage'] = self.rusage() + except NotImplementedError: + info['rusage'] = 'N/A' + return info + + def __repr__(self): + return ''.format( + self=self, state=self.blueprint.human_state(), + ) + + def __str__(self): + return self.hostname + @property def state(self): return state + + def setup_defaults(self, concurrency=None, loglevel=None, logfile=None, + send_events=None, pool_cls=None, consumer_cls=None, + timer_cls=None, timer_precision=None, + autoscaler_cls=None, autoreloader_cls=None, + pool_putlocks=None, pool_restarts=None, + force_execv=None, state_db=None, + schedule_filename=None, scheduler_cls=None, + task_time_limit=None, task_soft_time_limit=None, + max_tasks_per_child=None, prefetch_multiplier=None, + disable_rate_limits=None, worker_lost_wait=None, **_kw): + self.concurrency = self._getopt('concurrency', concurrency) + self.loglevel = self._getopt('log_level', loglevel) + self.logfile = self._getopt('log_file', logfile) + self.send_events = self._getopt('send_events', send_events) + self.pool_cls = self._getopt('pool', pool_cls) + self.consumer_cls = self._getopt('consumer', consumer_cls) + self.timer_cls = self._getopt('timer', timer_cls) + self.timer_precision = self._getopt('timer_precision', timer_precision) + self.autoscaler_cls = self._getopt('autoscaler', autoscaler_cls) + self.autoreloader_cls = self._getopt('autoreloader', autoreloader_cls) + self.pool_putlocks = self._getopt('pool_putlocks', pool_putlocks) + self.pool_restarts = self._getopt('pool_restarts', pool_restarts) + self.force_execv = self._getopt('force_execv', force_execv) + self.state_db = self._getopt('state_db', state_db) + self.schedule_filename = self._getopt( + 'schedule_filename', schedule_filename, + ) + self.scheduler_cls = self._getopt( + 'celerybeat_scheduler', scheduler_cls, + ) + self.task_time_limit = self._getopt( + 'task_time_limit', task_time_limit, + ) + self.task_soft_time_limit = self._getopt( + 'task_soft_time_limit', task_soft_time_limit, + ) + self.max_tasks_per_child = self._getopt( + 'max_tasks_per_child', max_tasks_per_child, + ) + self.prefetch_multiplier = int(self._getopt( + 'prefetch_multiplier', prefetch_multiplier, + )) + self.disable_rate_limits = self._getopt( + 'disable_rate_limits', disable_rate_limits, + ) + self.worker_lost_wait = self._getopt( + 'worker_lost_wait', worker_lost_wait, + ) + + def _getopt(self, key, value): + if value is not None: + return value + return self.app.conf.find_value_for_key(key, namespace='celeryd') diff --git a/awx/lib/site-packages/celery/worker/autoreload.py b/awx/lib/site-packages/celery/worker/autoreload.py index 6d0881b50c..c5483ee350 100644 --- a/awx/lib/site-packages/celery/worker/autoreload.py +++ b/awx/lib/site-packages/celery/worker/autoreload.py @@ -6,7 +6,6 @@ This module implements automatic module reloading """ from __future__ import absolute_import -from __future__ import with_statement import hashlib import os @@ -15,15 +14,19 @@ import sys import time from collections import defaultdict +from threading import Event from kombu.utils import eventio +from kombu.utils.encoding import ensure_bytes +from celery import bootsteps +from celery.five import items from celery.platforms import ignore_errno from celery.utils.imports import module_file from celery.utils.log import get_logger -from celery.utils.threads import bgThread, Event +from celery.utils.threads import bgThread -from .bootsteps import StartStopComponent +from .components import Pool try: # pragma: no cover import pyinotify @@ -32,37 +35,38 @@ except ImportError: # pragma: no cover pyinotify = None # noqa _ProcessEvent = object # noqa +__all__ = [ + 'WorkerComponent', 'Autoreloader', 'Monitor', 'BaseMonitor', + 'StatMonitor', 'KQueueMonitor', 'InotifyMonitor', 'file_hash', +] + logger = get_logger(__name__) -class WorkerComponent(StartStopComponent): - name = 'worker.autoreloader' - requires = ('pool', ) +class WorkerComponent(bootsteps.StartStopStep): + label = 'Autoreloader' + conditional = True + requires = (Pool, ) def __init__(self, w, autoreload=None, **kwargs): self.enabled = w.autoreload = autoreload w.autoreloader = None - def create_ev(self, w): - ar = w.autoreloader = self.instantiate(w.autoreloader_cls, w) - w.hub.on_init.append(ar.on_poll_init) - w.hub.on_close.append(ar.on_poll_close) - - def create_threaded(self, w): - w.autoreloader = self.instantiate(w.autoreloader_cls, w) - return w.autoreloader - def create(self, w): - if hasattr(select, 'kqueue') and w.use_eventloop: - return self.create_ev(w) - return self.create_threaded(w) + w.autoreloader = self.instantiate(w.autoreloader_cls, w) + return w.autoreloader if not w.use_eventloop else None + + def register_with_event_loop(self, w, hub): + if hasattr(select, 'kqueue'): + w.autoreloader.register_with_event_loop(hub) + hub.on_close.add(w.autoreloader.on_event_loop_close) def file_hash(filename, algorithm='md5'): hobj = hashlib.new(algorithm) with open(filename, 'rb') as f: - for chunk in iter(lambda: f.read(1048576), ''): - hobj.update(chunk) + for chunk in iter(lambda: f.read(2 ** 20), ''): + hobj.update(ensure_bytes(chunk)) return hobj.digest() @@ -121,11 +125,11 @@ class KQueueMonitor(BaseMonitor): self.filemap = dict((f, None) for f in self.files) self.fdmap = {} - def on_poll_init(self, hub): + def register_with_event_loop(self, hub): self.add_events(hub.poller) hub.poller.on_file_change = self.handle_event - def on_poll_close(self, hub): + def on_event_loop_close(self, hub): self.close(hub.poller) def add_events(self, poller): @@ -145,7 +149,7 @@ class KQueueMonitor(BaseMonitor): self.poller.poll(1) def close(self, poller): - for f, fd in self.filemap.iteritems(): + for f, fd in items(self.filemap): if fd is not None: poller.unregister(fd) with ignore_errno('EBADF'): # pragma: no cover @@ -197,7 +201,6 @@ class InotifyMonitor(_ProcessEvent): def default_implementation(): - # kqueue monitor not working properly at this time. if hasattr(select, 'kqueue'): return 'kqueue' if sys.platform.startswith('linux') and pyinotify: @@ -236,14 +239,14 @@ class Autoreloader(bgThread): shutdown_event=self._is_shutdown, **self.options) self._hashes = dict([(f, file_hash(f)) for f in files]) - def on_poll_init(self, hub): + def register_with_event_loop(self, hub): if self._monitor is None: self.on_init() - self._monitor.on_poll_init(hub) + self._monitor.register_with_event_loop(hub) - def on_poll_close(self, hub): + def on_event_loop_close(self, hub): if self._monitor is not None: - self._monitor.on_poll_close(hub) + self._monitor.on_event_loop_close(hub) def body(self): self.on_init() diff --git a/awx/lib/site-packages/celery/worker/autoscale.py b/awx/lib/site-packages/celery/worker/autoscale.py index 702f7b7fd3..14afc2e95b 100644 --- a/awx/lib/site-packages/celery/worker/autoscale.py +++ b/awx/lib/site-packages/celery/worker/autoscale.py @@ -7,68 +7,64 @@ for growing and shrinking the pool according to the current autoscale settings. - The autoscale thread is only enabled if autoscale - has been enabled on the command line. + The autoscale thread is only enabled if :option:`--autoscale` + has been enabled on the command-line. """ from __future__ import absolute_import -from __future__ import with_statement import os import threading -from functools import partial -from time import sleep, time +from time import sleep +from kombu.async.semaphore import DummyLock + +from celery import bootsteps +from celery.five import monotonic from celery.utils.log import get_logger from celery.utils.threads import bgThread from . import state -from .bootsteps import StartStopComponent -from .hub import DummyLock +from .components import Pool + +__all__ = ['Autoscaler', 'WorkerComponent'] logger = get_logger(__name__) debug, info, error = logger.debug, logger.info, logger.error -AUTOSCALE_KEEPALIVE = int(os.environ.get('AUTOSCALE_KEEPALIVE', 30)) +AUTOSCALE_KEEPALIVE = float(os.environ.get('AUTOSCALE_KEEPALIVE', 30)) -class WorkerComponent(StartStopComponent): - name = 'worker.autoscaler' - requires = ('pool', ) +class WorkerComponent(bootsteps.StartStopStep): + label = 'Autoscaler' + conditional = True + requires = (Pool, ) def __init__(self, w, **kwargs): self.enabled = w.autoscale w.autoscaler = None - def create_threaded(self, w): - scaler = w.autoscaler = self.instantiate( - w.autoscaler_cls, - w.pool, w.max_concurrency, w.min_concurrency, - ) - return scaler - - def on_poll_init(self, scaler, hub): - hub.on_task.append(scaler.maybe_scale) - hub.timer.apply_interval(scaler.keepalive * 1000.0, scaler.maybe_scale) - - def create_ev(self, w): - scaler = w.autoscaler = self.instantiate( - w.autoscaler_cls, - w.pool, w.max_concurrency, w.min_concurrency, - mutex=DummyLock(), - ) - w.hub.on_init.append(partial(self.on_poll_init, scaler)) - def create(self, w): - return (self.create_ev if w.use_eventloop - else self.create_threaded)(w) + scaler = w.autoscaler = self.instantiate( + w.autoscaler_cls, + w.pool, w.max_concurrency, w.min_concurrency, + worker=w, mutex=DummyLock() if w.use_eventloop else None, + ) + return scaler if not w.use_eventloop else None + + def register_with_event_loop(self, w, hub): + w.consumer.on_task_message.add(w.autoscaler.maybe_scale) + hub.call_repeatedly( + w.autoscaler.keepalive, w.autoscaler.maybe_scale, + ) class Autoscaler(bgThread): def __init__(self, pool, max_concurrency, - min_concurrency=0, keepalive=AUTOSCALE_KEEPALIVE, mutex=None): + min_concurrency=0, worker=None, + keepalive=AUTOSCALE_KEEPALIVE, mutex=None): super(Autoscaler, self).__init__() self.pool = pool self.mutex = mutex or threading.Lock() @@ -76,6 +72,7 @@ class Autoscaler(bgThread): self.min_concurrency = min_concurrency self.keepalive = keepalive self._last_action = None + self.worker = worker assert self.keepalive, 'cannot scale down too fast.' @@ -126,18 +123,19 @@ class Autoscaler(bgThread): self._shrink(min(n, self.processes)) def scale_up(self, n): - self._last_action = time() + self._last_action = monotonic() return self._grow(n) def scale_down(self, n): if n and self._last_action and ( - time() - self._last_action > self.keepalive): - self._last_action = time() + monotonic() - self._last_action > self.keepalive): + self._last_action = monotonic() return self._shrink(n) def _grow(self, n): info('Scaling up %s processes.', n) self.pool.grow(n) + self.worker.consumer._update_prefetch_count(n) def _shrink(self, n): info('Scaling down %s processes.', n) @@ -145,8 +143,9 @@ class Autoscaler(bgThread): self.pool.shrink(n) except ValueError: debug("Autoscaler won't scale down: all processes busy.") - except Exception, exc: + except Exception as exc: error('Autoscaler: scale_down: %r', exc, exc_info=True) + self.worker.consumer._update_prefetch_count(-n) def info(self): return {'max': self.max_concurrency, diff --git a/awx/lib/site-packages/celery/worker/bootsteps.py b/awx/lib/site-packages/celery/worker/bootsteps.py deleted file mode 100644 index 147b9a4a68..0000000000 --- a/awx/lib/site-packages/celery/worker/bootsteps.py +++ /dev/null @@ -1,211 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.bootsteps - ~~~~~~~~~~~~~~~~~~~~~~~ - - The boot-step components. - -""" -from __future__ import absolute_import - -from collections import defaultdict -from importlib import import_module - -from celery.datastructures import DependencyGraph -from celery.utils.imports import instantiate -from celery.utils.log import get_logger - -logger = get_logger(__name__) - - -class Namespace(object): - """A namespace containing components. - - Every component must belong to a namespace. - - When component classes are created they are added to the - mapping of unclaimed components. The components will be - claimed when the namespace they belong to is created. - - :keyword name: Set the name of this namespace. - :keyword app: Set the Celery app for this namespace. - - """ - name = None - _unclaimed = defaultdict(dict) - _started_count = 0 - - def __init__(self, name=None, app=None): - self.app = app - self.name = name or self.name - self.services = [] - - def modules(self): - """Subclasses can override this to return a - list of modules to import before components are claimed.""" - return [] - - def load_modules(self): - """Will load the component modules this namespace depends on.""" - for m in self.modules(): - self.import_module(m) - - def apply(self, parent, **kwargs): - """Apply the components in this namespace to an object. - - This will apply the ``__init__`` and ``include`` methods - of each components with the object as argument. - - For ``StartStopComponents`` the services created - will also be added the the objects ``components`` attribute. - - """ - self._debug('Loading modules.') - self.load_modules() - self._debug('Claiming components.') - self.components = self._claim() - self._debug('Building boot step graph.') - self.boot_steps = [self.bind_component(name, parent, **kwargs) - for name in self._finalize_boot_steps()] - self._debug( - 'New boot order: {%s}', ', '.join(c.name for c in self.boot_steps), - ) - - for component in self.boot_steps: - component.include(parent) - return self - - def bind_component(self, name, parent, **kwargs): - """Bind component to parent object and this namespace.""" - comp = self[name](parent, **kwargs) - comp.namespace = self - return comp - - def import_module(self, module): - return import_module(module) - - def __getitem__(self, name): - return self.components[name] - - def _find_last(self): - for C in self.components.itervalues(): - if C.last: - return C - - def _finalize_boot_steps(self): - G = self.graph = DependencyGraph( - (C.name, C.requires) for C in self.components.itervalues()) - last = self._find_last() - if last: - for obj in G: - if obj != last.name: - G.add_edge(last.name, obj) - return G.topsort() - - def _claim(self): - return self._unclaimed[self.name] - - def _debug(self, msg, *args): - return logger.debug('[%s] ' + msg, - *(self.name.capitalize(), ) + args) - - -class ComponentType(type): - """Metaclass for components.""" - - def __new__(cls, name, bases, attrs): - abstract = attrs.pop('abstract', False) - if not abstract: - try: - cname = attrs['name'] - except KeyError: - raise NotImplementedError('Components must be named') - namespace = attrs.get('namespace', None) - if not namespace: - attrs['namespace'], _, attrs['name'] = cname.partition('.') - cls = super(ComponentType, cls).__new__(cls, name, bases, attrs) - if not abstract: - Namespace._unclaimed[cls.namespace][cls.name] = cls - return cls - - -class Component(object): - """A component. - - The :meth:`__init__` method is called when the component - is bound to a parent object, and can as such be used - to initialize attributes in the parent object at - parent instantiation-time. - - """ - __metaclass__ = ComponentType - - #: The name of the component, or the namespace - #: and the name of the component separated by dot. - name = None - - #: List of component names this component depends on. - #: Note that the dependencies must be in the same namespace. - requires = () - - #: can be used to specify the namespace, - #: if the name does not include it. - namespace = None - - #: if set the component will not be registered, - #: but can be used as a component base class. - abstract = True - - #: Optional obj created by the :meth:`create` method. - #: This is used by StartStopComponents to keep the - #: original service object. - obj = None - - #: This flag is reserved for the workers Consumer, - #: since it is required to always be started last. - #: There can only be one object marked with lsat - #: in every namespace. - last = False - - #: This provides the default for :meth:`include_if`. - enabled = True - - def __init__(self, parent, **kwargs): - pass - - def create(self, parent): - """Create the component.""" - pass - - def include_if(self, parent): - """An optional predicate that decided whether this - component should be created.""" - return self.enabled - - def instantiate(self, qualname, *args, **kwargs): - return instantiate(qualname, *args, **kwargs) - - def include(self, parent): - if self.include_if(parent): - self.obj = self.create(parent) - return True - - -class StartStopComponent(Component): - abstract = True - terminable = False - - def start(self): - return self.obj.start() - - def stop(self): - return self.obj.stop() - - def terminate(self): - if self.terminable: - return self.obj.terminate() - return self.obj.stop() - - def include(self, parent): - if super(StartStopComponent, self).include(parent): - parent.components.append(self.obj) diff --git a/awx/lib/site-packages/celery/worker/buckets.py b/awx/lib/site-packages/celery/worker/buckets.py deleted file mode 100644 index 2d7ccc3081..0000000000 --- a/awx/lib/site-packages/celery/worker/buckets.py +++ /dev/null @@ -1,391 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.buckets - ~~~~~~~~~~~~~~~~~~~~~ - - This module implements the rate limiting of tasks, - by having a token bucket queue for each task type. - When a task is allowed to be processed it's moved - over the the ``ready_queue`` - - The :mod:`celery.worker.mediator` is then responsible - for moving tasks from the ``ready_queue`` to the worker pool. - -""" -from __future__ import absolute_import -from __future__ import with_statement - -import threading - -from collections import deque -from time import time, sleep -from Queue import Queue, Empty - -from kombu.utils.limits import TokenBucket - -from celery.utils import timeutils -from celery.utils.compat import zip_longest, chain_from_iterable - - -class RateLimitExceeded(Exception): - """The token buckets rate limit has been exceeded.""" - - -class AsyncTaskBucket(object): - - def __init__(self, task_registry, callback=None, worker=None): - self.task_registry = task_registry - self.callback = callback - self.worker = worker - self.buckets = {} - self.refresh() - self._queue = Queue() - self._quick_put = self._queue.put - self.get = self._queue.get - - def get(self, *args, **kwargs): - return self._queue.get(*args, **kwargs) - - def cont(self, request, bucket, tokens): - if not bucket.can_consume(tokens): - hold = bucket.expected_time(tokens) - self.worker.timer.apply_after( - hold * 1000.0, self.cont, (request, bucket, tokens), - ) - else: - self._quick_put(request) - - def put(self, request): - name = request.name - try: - bucket = self.buckets[name] - except KeyError: - bucket = self.add_bucket_for_type(name) - if not bucket: - return self._quick_put(request) - return self.cont(request, bucket, 1) - - def add_task_type(self, name): - task_type = self.task_registry[name] - limit = getattr(task_type, 'rate_limit', None) - limit = timeutils.rate(limit) - bucket = self.buckets[name] = ( - TokenBucket(limit, capacity=1) if limit else None - ) - return bucket - - def clear(self): - # called by the worker when the connection is lost, - # but this also clears out the timer so we be good. - pass - - def refresh(self): - for name in self.task_registry: - self.add_task_type(name) - - -class TaskBucket(object): - """This is a collection of token buckets, each task type having - its own token bucket. If the task type doesn't have a rate limit, - it will have a plain :class:`~Queue.Queue` object instead of a - :class:`TokenBucketQueue`. - - The :meth:`put` operation forwards the task to its appropriate bucket, - while the :meth:`get` operation iterates over the buckets and retrieves - the first available item. - - Say we have three types of tasks in the registry: `twitter.update`, - `feed.refresh` and `video.compress`, the TaskBucket will consist - of the following items:: - - {'twitter.update': TokenBucketQueue(fill_rate=300), - 'feed.refresh': Queue(), - 'video.compress': TokenBucketQueue(fill_rate=2)} - - The get operation will iterate over these until one of the buckets - is able to return an item. The underlying datastructure is a `dict`, - so the order is ignored here. - - :param task_registry: The task registry used to get the task - type class for a given task name. - - """ - - def __init__(self, task_registry, callback=None, worker=None): - self.task_registry = task_registry - self.buckets = {} - self.init_with_registry() - self.immediate = deque() - self.mutex = threading.Lock() - self.not_empty = threading.Condition(self.mutex) - self.callback = callback - self.worker = worker - - def put(self, request): - """Put a :class:`~celery.worker.job.Request` into - the appropiate bucket.""" - if request.name not in self.buckets: - self.add_bucket_for_type(request.name) - self.buckets[request.name].put_nowait(request) - with self.mutex: - self.not_empty.notify() - put_nowait = put - - def _get_immediate(self): - try: - return self.immediate.popleft() - except IndexError: - raise Empty() - - def _get(self): - # If the first bucket is always returning items, we would never - # get to fetch items from the other buckets. So we always iterate over - # all the buckets and put any ready items into a queue called - # "immediate". This queue is always checked for cached items first. - try: - return 0, self._get_immediate() - except Empty: - pass - - remaining_times = [] - for bucket in self.buckets.values(): - remaining = bucket.expected_time() - if not remaining: - try: - # Just put any ready items into the immediate queue. - self.immediate.append(bucket.get_nowait()) - except Empty: - pass - except RateLimitExceeded: - remaining_times.append(bucket.expected_time()) - else: - remaining_times.append(remaining) - - # Try the immediate queue again. - try: - return 0, self._get_immediate() - except Empty: - if not remaining_times: - # No items in any of the buckets. - raise - - # There's items, but have to wait before we can retrieve them, - # return the shortest remaining time. - return min(remaining_times), None - - def get(self, block=True, timeout=None): - """Retrive the task from the first available bucket. - - Available as in, there is an item in the queue and you can - consume tokens from it. - - """ - tstart = time() - get = self._get - not_empty = self.not_empty - - with not_empty: - while 1: - try: - remaining_time, item = get() - except Empty: - if not block or (timeout and time() - tstart > timeout): - raise - not_empty.wait(timeout) - continue - if remaining_time: - if not block or (timeout and time() - tstart > timeout): - raise Empty() - sleep(min(remaining_time, timeout or 1)) - else: - return item - - def get_nowait(self): - return self.get(block=False) - - def init_with_registry(self): - """Initialize with buckets for all the task types in the registry.""" - for task in self.task_registry: - self.add_bucket_for_type(task) - - def refresh(self): - """Refresh rate limits for all task types in the registry.""" - for task in self.task_registry: - self.update_bucket_for_type(task) - - def get_bucket_for_type(self, task_name): - """Get the bucket for a particular task type.""" - if task_name not in self.buckets: - return self.add_bucket_for_type(task_name) - return self.buckets[task_name] - - def _get_queue_for_type(self, task_name): - bucket = self.buckets[task_name] - if isinstance(bucket, TokenBucketQueue): - return bucket.queue - return bucket - - def update_bucket_for_type(self, task_name): - task_type = self.task_registry[task_name] - rate_limit = getattr(task_type, 'rate_limit', None) - rate_limit = timeutils.rate(rate_limit) - task_queue = FastQueue() - if task_name in self.buckets: - task_queue = self._get_queue_for_type(task_name) - else: - task_queue = FastQueue() - - if rate_limit: - task_queue = TokenBucketQueue(rate_limit, queue=task_queue) - - self.buckets[task_name] = task_queue - return task_queue - - def add_bucket_for_type(self, task_name): - """Add a bucket for a task type. - - Will read the tasks rate limit and create a :class:`TokenBucketQueue` - if it has one. If the task doesn't have a rate limit - :class:`FastQueue` will be used instead. - - """ - if task_name not in self.buckets: - return self.update_bucket_for_type(task_name) - - def qsize(self): - """Get the total size of all the queues.""" - return sum(bucket.qsize() for bucket in self.buckets.values()) - - def empty(self): - """Returns :const:`True` if all of the buckets are empty.""" - return all(bucket.empty() for bucket in self.buckets.values()) - - def clear(self): - """Delete the data in all of the buckets.""" - for bucket in self.buckets.values(): - bucket.clear() - - @property - def items(self): - """Flattens the data in all of the buckets into a single list.""" - # for queues with contents [(1, 2), (3, 4), (5, 6), (7, 8)] - # zips and flattens to [1, 3, 5, 7, 2, 4, 6, 8] - return filter(None, chain_from_iterable( - zip_longest(*[bucket.items for bucket in self.buckets.values()])), - ) - - -class FastQueue(Queue): - """:class:`Queue.Queue` supporting the interface of - :class:`TokenBucketQueue`.""" - - def clear(self): - return self.queue.clear() - - def expected_time(self, tokens=1): - return 0 - - def wait(self, block=True): - return self.get(block=block) - - @property - def items(self): - return self.queue - - -class TokenBucketQueue(object): - """Queue with rate limited get operations. - - This uses the token bucket algorithm to rate limit the queue on get - operations. - - :param fill_rate: The rate in tokens/second that the bucket will - be refilled. - :keyword capacity: Maximum number of tokens in the bucket. - Default is 1. - - """ - RateLimitExceeded = RateLimitExceeded - - def __init__(self, fill_rate, queue=None, capacity=1): - self._bucket = TokenBucket(fill_rate, capacity) - self.queue = queue - if not self.queue: - self.queue = Queue() - - def put(self, item, block=True): - """Put an item onto the queue.""" - self.queue.put(item, block=block) - - def put_nowait(self, item): - """Put an item into the queue without blocking. - - :raises Queue.Full: If a free slot is not immediately available. - - """ - return self.put(item, block=False) - - def get(self, block=True): - """Remove and return an item from the queue. - - :raises RateLimitExceeded: If a token could not be consumed from the - token bucket (consuming from the queue - too fast). - :raises Queue.Empty: If an item is not immediately available. - - """ - get = block and self.queue.get or self.queue.get_nowait - - if not block and not self.items: - raise Empty() - - if not self._bucket.can_consume(1): - raise RateLimitExceeded() - - return get() - - def get_nowait(self): - """Remove and return an item from the queue without blocking. - - :raises RateLimitExceeded: If a token could not be consumed from the - token bucket (consuming from the queue - too fast). - :raises Queue.Empty: If an item is not immediately available. - - """ - return self.get(block=False) - - def qsize(self): - """Returns the size of the queue.""" - return self.queue.qsize() - - def empty(self): - """Returns :const:`True` if the queue is empty.""" - return self.queue.empty() - - def clear(self): - """Delete all data in the queue.""" - return self.items.clear() - - def wait(self, block=False): - """Wait until a token can be retrieved from the bucket and return - the next item.""" - get = self.get - expected_time = self.expected_time - while 1: - remaining = expected_time() - if not remaining: - return get(block=block) - sleep(remaining) - - def expected_time(self, tokens=1): - """Returns the expected time in seconds of when a new token should be - available.""" - if not self.items: - return 0 - return self._bucket.expected_time(tokens) - - @property - def items(self): - """Underlying data. Do not modify.""" - return self.queue.queue diff --git a/awx/lib/site-packages/celery/worker/components.py b/awx/lib/site-packages/celery/worker/components.py new file mode 100644 index 0000000000..be355f0a46 --- /dev/null +++ b/awx/lib/site-packages/celery/worker/components.py @@ -0,0 +1,247 @@ +# -*- coding: utf-8 -*- +""" + celery.worker.components + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Default worker bootsteps. + +""" +from __future__ import absolute_import + +import atexit +import warnings + +from kombu.async import Hub as _Hub, get_event_loop, set_event_loop +from kombu.async.semaphore import DummyLock, LaxBoundedSemaphore +from kombu.async.timer import Timer as _Timer + +from celery import bootsteps +from celery._state import _set_task_join_will_block +from celery.exceptions import ImproperlyConfigured +from celery.five import string_t +from celery.utils.log import worker_logger as logger + +__all__ = ['Timer', 'Hub', 'Queues', 'Pool', 'Beat', 'StateDB', 'Consumer'] + +ERR_B_GREEN = """\ +-B option doesn't work with eventlet/gevent pools: \ +use standalone beat instead.\ +""" + +W_POOL_SETTING = """ +The CELERYD_POOL setting should not be used to select the eventlet/gevent +pools, instead you *must use the -P* argument so that patches are applied +as early as possible. +""" + + +class Timer(bootsteps.Step): + """This step initializes the internal timer used by the worker.""" + + def create(self, w): + if w.use_eventloop: + # does not use dedicated timer thread. + w.timer = _Timer(max_interval=10.0) + else: + if not w.timer_cls: + # Default Timer is set by the pool, as e.g. eventlet + # needs a custom implementation. + w.timer_cls = w.pool_cls.Timer + w.timer = self.instantiate(w.timer_cls, + max_interval=w.timer_precision, + on_timer_error=self.on_timer_error, + on_timer_tick=self.on_timer_tick) + + def on_timer_error(self, exc): + logger.error('Timer error: %r', exc, exc_info=True) + + def on_timer_tick(self, delay): + logger.debug('Timer wake-up! Next eta %s secs.', delay) + + +class Hub(bootsteps.StartStopStep): + requires = (Timer, ) + + def __init__(self, w, **kwargs): + w.hub = None + + def include_if(self, w): + return w.use_eventloop + + def create(self, w): + w.hub = get_event_loop() + if w.hub is None: + w.hub = set_event_loop(_Hub(w.timer)) + self._patch_thread_primitives(w) + return self + + def start(self, w): + pass + + def stop(self, w): + w.hub.close() + + def terminate(self, w): + w.hub.close() + + def _patch_thread_primitives(self, w): + # make clock use dummy lock + w.app.clock.lock = DummyLock() + # multiprocessing's ApplyResult uses this lock. + try: + from billiard import pool + except ImportError: + pass + else: + pool.Lock = DummyLock + + +class Queues(bootsteps.Step): + """This bootstep initializes the internal queues + used by the worker.""" + label = 'Queues (intra)' + requires = (Hub, ) + + def create(self, w): + w.process_task = w._process_task + if w.use_eventloop: + if w.pool_putlocks and w.pool_cls.uses_semaphore: + w.process_task = w._process_task_sem + + +class Pool(bootsteps.StartStopStep): + """Bootstep managing the worker pool. + + Describes how to initialize the worker pool, and starts and stops + the pool during worker startup/shutdown. + + Adds attributes: + + * autoscale + * pool + * max_concurrency + * min_concurrency + + """ + requires = (Queues, ) + + def __init__(self, w, autoscale=None, autoreload=None, + no_execv=False, optimization=None, **kwargs): + if isinstance(autoscale, string_t): + max_c, _, min_c = autoscale.partition(',') + autoscale = [int(max_c), min_c and int(min_c) or 0] + w.autoscale = autoscale + w.pool = None + w.max_concurrency = None + w.min_concurrency = w.concurrency + w.no_execv = no_execv + if w.autoscale: + w.max_concurrency, w.min_concurrency = w.autoscale + self.autoreload_enabled = autoreload + self.optimization = optimization + + def close(self, w): + if w.pool: + w.pool.close() + + def terminate(self, w): + if w.pool: + w.pool.terminate() + + def create(self, w, semaphore=None, max_restarts=None): + if w.app.conf.CELERYD_POOL in ('eventlet', 'gevent'): + warnings.warn(UserWarning(W_POOL_SETTING)) + threaded = not w.use_eventloop + procs = w.min_concurrency + forking_enable = w.no_execv if w.force_execv else True + if not threaded: + semaphore = w.semaphore = LaxBoundedSemaphore(procs) + w._quick_acquire = w.semaphore.acquire + w._quick_release = w.semaphore.release + max_restarts = 100 + allow_restart = self.autoreload_enabled or w.pool_restarts + pool = w.pool = self.instantiate( + w.pool_cls, w.min_concurrency, + initargs=(w.app, w.hostname), + maxtasksperchild=w.max_tasks_per_child, + timeout=w.task_time_limit, + soft_timeout=w.task_soft_time_limit, + putlocks=w.pool_putlocks and threaded, + lost_worker_timeout=w.worker_lost_wait, + threads=threaded, + max_restarts=max_restarts, + allow_restart=allow_restart, + forking_enable=forking_enable, + semaphore=semaphore, + sched_strategy=self.optimization, + ) + _set_task_join_will_block(pool.task_join_will_block) + return pool + + def info(self, w): + return {'pool': w.pool.info} + + def register_with_event_loop(self, w, hub): + w.pool.register_with_event_loop(hub) + + +class Beat(bootsteps.StartStopStep): + """Step used to embed a beat process. + + This will only be enabled if the ``beat`` + argument is set. + + """ + label = 'Beat' + conditional = True + + def __init__(self, w, beat=False, **kwargs): + self.enabled = w.beat = beat + w.beat = None + + def create(self, w): + from celery.beat import EmbeddedService + if w.pool_cls.__module__.endswith(('gevent', 'eventlet')): + raise ImproperlyConfigured(ERR_B_GREEN) + b = w.beat = EmbeddedService(app=w.app, + schedule_filename=w.schedule_filename, + scheduler_cls=w.scheduler_cls) + return b + + +class StateDB(bootsteps.Step): + """This bootstep sets up the workers state db if enabled.""" + + def __init__(self, w, **kwargs): + self.enabled = w.state_db + w._persistence = None + + def create(self, w): + w._persistence = w.state.Persistent(w.state, w.state_db, w.app.clock) + atexit.register(w._persistence.save) + + +class Consumer(bootsteps.StartStopStep): + last = True + + def create(self, w): + if w.max_concurrency: + prefetch_count = max(w.min_concurrency, 1) * w.prefetch_multiplier + else: + prefetch_count = w.concurrency * w.prefetch_multiplier + c = w.consumer = self.instantiate( + w.consumer_cls, w.process_task, + hostname=w.hostname, + send_events=w.send_events, + init_callback=w.ready_callback, + initial_prefetch_count=prefetch_count, + pool=w.pool, + timer=w.timer, + app=w.app, + controller=w, + hub=w.hub, + worker_options=w.options, + disable_rate_limits=w.disable_rate_limits, + prefetch_multiplier=w.prefetch_multiplier, + ) + return c diff --git a/awx/lib/site-packages/celery/worker/consumer.py b/awx/lib/site-packages/celery/worker/consumer.py index 4d811ffed2..853b44ae45 100644 --- a/awx/lib/site-packages/celery/worker/consumer.py +++ b/awx/lib/site-packages/celery/worker/consumer.py @@ -3,123 +3,89 @@ celery.worker.consumer ~~~~~~~~~~~~~~~~~~~~~~ -This module contains the component responsible for consuming messages +This module contains the components responsible for consuming messages from the broker, processing the messages and keeping the broker connections up and running. - -* :meth:`~Consumer.start` is an infinite loop, which only iterates - again if the connection is lost. For each iteration (at start, or if the - connection is lost) it calls :meth:`~Consumer.reset_connection`, - and starts the consumer by calling :meth:`~Consumer.consume_messages`. - -* :meth:`~Consumer.reset_connection`, clears the internal queues, - establishes a new connection to the broker, sets up the task - consumer (+ QoS), and the broadcast remote control command consumer. - - Also if events are enabled it configures the event dispatcher and starts - up the heartbeat thread. - -* Finally it can consume messages. :meth:`~Consumer.consume_messages` - is simply an infinite loop waiting for events on the AMQP channels. - - Both the task consumer and the broadcast consumer uses the same - callback: :meth:`~Consumer.receive_message`. - -* So for each message received the :meth:`~Consumer.receive_message` - method is called, this checks the payload of the message for either - a `task` key or a `control` key. - - If the message is a task, it verifies the validity of the message - converts it to a :class:`celery.worker.job.Request`, and sends - it to :meth:`~Consumer.on_task`. - - If the message is a control command the message is passed to - :meth:`~Consumer.on_control`, which in turn dispatches - the control command using the control dispatcher. - - It also tries to handle malformed or invalid messages properly, - so the worker doesn't choke on them and die. Any invalid messages - are acknowledged immediately and logged, so the message is not resent - again, and again. - -* If the task has an ETA/countdown, the task is moved to the `timer` - so the :class:`timer2.Timer` can schedule it at its - deadline. Tasks without an eta are moved immediately to the `ready_queue`, - so they can be picked up by the :class:`~celery.worker.mediator.Mediator` - to be sent to the pool. - -* When a task with an ETA is received the QoS prefetch count is also - incremented, so another message can be reserved. When the ETA is met - the prefetch count is decremented again, though this cannot happen - immediately because most broker clients don't support doing broker - requests across threads. Instead the current prefetch count is kept as a - shared counter, so as soon as :meth:`~Consumer.consume_messages` - detects that the value has changed it will send out the actual - QoS event to the broker. - -* Notice that when the connection is lost all internal queues are cleared - because we can no longer ack the messages reserved in memory. - However, this is not dangerous as the broker will resend them - to another worker when the channel is closed. - -* **WARNING**: :meth:`~Consumer.stop` does not close the connection! - This is because some pre-acked messages may be in processing, - and they need to be finished before the channel is closed. - For celeryd this means the pool must finish the tasks it has acked - early, *then* close the connection. - """ from __future__ import absolute_import -from __future__ import with_statement +import errno +import kombu import logging +import os import socket -import threading +from collections import defaultdict +from functools import partial +from heapq import heappush +from operator import itemgetter from time import sleep -from Queue import Empty from billiard.common import restart_state from billiard.exceptions import RestartFreqExceeded +from kombu.async.semaphore import DummyLock +from kombu.common import QoS, ignore_errors from kombu.syn import _detect_environment -from kombu.utils.encoding import safe_repr, safe_str, bytes_t -from kombu.utils.eventio import READ, WRITE, ERR +from kombu.utils.compat import get_errno +from kombu.utils.encoding import safe_repr, bytes_t +from kombu.utils.limits import TokenBucket -from celery.app import app_or_default -from celery.datastructures import AttributeDict -from celery.exceptions import InvalidTaskError, SystemTerminate -from celery.task.trace import build_tracer -from celery.utils import text -from celery.utils import timer2 +from celery import bootsteps +from celery.app.trace import build_tracer +from celery.canvas import signature +from celery.exceptions import InvalidTaskError +from celery.five import items, values from celery.utils.functional import noop from celery.utils.log import get_logger -from celery.utils.timer2 import to_timestamp -from celery.utils.timeutils import humanize_seconds, timezone +from celery.utils.text import truncate +from celery.utils.timeutils import humanize_seconds, rate -from . import state -from .bootsteps import StartStopComponent -from .control import Panel -from .heartbeat import Heart +from . import heartbeat, loops, pidbox +from .state import task_reserved, maybe_shutdown, revoked, reserved_requests try: buffer_t = buffer except NameError: # pragma: no cover + # Py3 does not have buffer, but we only need isinstance. class buffer_t(object): # noqa pass -RUN = 0x1 -CLOSE = 0x2 +__all__ = [ + 'Consumer', 'Connection', 'Events', 'Heart', 'Control', + 'Tasks', 'Evloop', 'Agent', 'Mingle', 'Gossip', 'dump_body', +] -#: Prefetch count can't exceed short. -PREFETCH_COUNT_MAX = 0xFFFF +CLOSE = bootsteps.CLOSE +logger = get_logger(__name__) +debug, info, warn, error, crit = (logger.debug, logger.info, logger.warning, + logger.error, logger.critical) + +CONNECTION_RETRY = """\ +consumer: Connection to broker lost. \ +Trying to re-establish the connection...\ +""" + +CONNECTION_RETRY_STEP = """\ +Trying again {when}...\ +""" + +CONNECTION_ERROR = """\ +consumer: Cannot connect to %s: %s. +%s +""" + +CONNECTION_FAILOVER = """\ +Will retry using next failover.\ +""" UNKNOWN_FORMAT = """\ Received and deleted unknown message. Wrong destination?!? The full contents of the message body was: %s """ + #: Error message for when an unregistered task is received. UNKNOWN_TASK_ERROR = """\ Received unregistered task of type %s. @@ -127,7 +93,7 @@ The message has been ignored and discarded. Did you remember to import the module containing this task? Or maybe you are using relative imports? -More: http://docs.celeryq.org/en/latest/userguide/tasks.html#names +Please see http://bit.ly/gLye1c for more information. The full contents of the message body was: %s @@ -138,184 +104,37 @@ INVALID_TASK_ERROR = """\ Received invalid task message: %s The message has been ignored and discarded. -Please ensure your message conforms to the task message format: -http://docs.celeryq.org/en/latest/internals/protocol.html +Please ensure your message conforms to the task +message protocol as described here: http://bit.ly/hYj41y The full contents of the message body was: %s """ MESSAGE_REPORT = """\ -body: %s {content_type:%s content_encoding:%s delivery_info:%s}\ +body: {0} {{content_type:{1} content_encoding:{2} delivery_info:{3}}}\ """ - -RETRY_CONNECTION = """\ -consumer: Connection to broker lost. \ -Trying to re-establish the connection...\ -""" - -CONNECTION_ERROR = """\ -consumer: Cannot connect to %s: %s. -%s -""" - -CONNECTION_RETRY = """\ -Trying again %(when)s...\ -""" - -CONNECTION_FAILOVER = """\ -Will retry using next failover.\ -""" - -task_reserved = state.task_reserved - -logger = get_logger(__name__) -info, warn, error, crit = (logger.info, logger.warning, - logger.error, logger.critical) - - -def debug(msg, *args, **kwargs): - logger.debug('consumer: %s' % (msg, ), *args, **kwargs) +MINGLE_GET_FIELDS = itemgetter('clock', 'revoked') def dump_body(m, body): if isinstance(body, buffer_t): - body = bytes_t(body) - return "%s (%sb)" % (text.truncate(safe_repr(body), 1024), len(m.body)) - - -class Component(StartStopComponent): - name = 'worker.consumer' - last = True - - def Consumer(self, w): - return (w.consumer_cls or - Consumer if w.hub else BlockingConsumer) - - def create(self, w): - prefetch_count = w.concurrency * w.prefetch_multiplier - c = w.consumer = self.instantiate( - self.Consumer(w), - w.ready_queue, - hostname=w.hostname, - send_events=w.send_events, - init_callback=w.ready_callback, - initial_prefetch_count=prefetch_count, - pool=w.pool, - timer=w.timer, - app=w.app, - controller=w, - hub=w.hub, - ) - return c - - -class QoS(object): - """Thread safe increment/decrement of a channels prefetch_count. - - :param consumer: A :class:`kombu.messaging.Consumer` instance. - :param initial_value: Initial prefetch count value. - - """ - prev = None - - def __init__(self, consumer, initial_value): - self.consumer = consumer - self._mutex = threading.RLock() - self.value = initial_value or 0 - - def increment_eventually(self, n=1): - """Increment the value, but do not update the channels QoS. - - The MainThread will be responsible for calling :meth:`update` - when necessary. - - """ - with self._mutex: - if self.value: - self.value = self.value + max(n, 0) - return self.value - - def decrement_eventually(self, n=1): - """Decrement the value, but do not update the channels QoS. - - The MainThread will be responsible for calling :meth:`update` - when necessary. - - """ - with self._mutex: - if self.value: - self.value -= n - return self.value - - def set(self, pcount): - """Set channel prefetch_count setting.""" - if pcount != self.prev: - new_value = pcount - if pcount > PREFETCH_COUNT_MAX: - warn('QoS: Disabled: prefetch_count exceeds %r', - PREFETCH_COUNT_MAX) - new_value = 0 - debug('basic.qos: prefetch_count->%s', new_value) - self.consumer.qos(prefetch_count=new_value) - self.prev = pcount - return pcount - - def update(self): - """Update prefetch count with current value.""" - with self._mutex: - return self.set(self.value) + body = bytes_t(buffer) + return '{0} ({1}b)'.format(truncate(safe_repr(body), 1024), + len(m.body)) class Consumer(object): - """Listen for messages received from the broker and - move them to the ready queue for task processing. + Strategies = dict - :param ready_queue: See :attr:`ready_queue`. - :param timer: See :attr:`timer`. + #: set when consumer is shutting down. + in_shutdown = False - """ - - #: The queue that holds tasks ready for immediate processing. - ready_queue = None - - #: Enable/disable events. - send_events = False - - #: Optional callback to be called when the connection is established. - #: Will only be called once, even if the connection is lost and - #: re-established. + #: Optional callback called the first time the worker + #: is ready to receive tasks. init_callback = None - #: The current hostname. Defaults to the system hostname. - hostname = None - - #: Initial QoS prefetch count for the task channel. - initial_prefetch_count = 0 - - #: A :class:`celery.events.EventDispatcher` for sending events. - event_dispatcher = None - - #: The thread that sends event heartbeats at regular intervals. - #: The heartbeats are used by monitors to detect that a worker - #: went offline/disappeared. - heart = None - - #: The broker connection. - connection = None - - #: The consumer used to consume task messages. - task_consumer = None - - #: The consumer used to consume broadcast commands. - broadcast_consumer = None - - #: The process mailbox (kombu pidbox node). - pidbox_node = None - _pidbox_node_shutdown = None # used for greenlets - _pidbox_node_stopped = None # used for greenlets - #: The current worker pool instance. pool = None @@ -323,354 +142,170 @@ class Consumer(object): #: as sending heartbeats. timer = None - # Consumer state, can be RUN or CLOSE. - _state = None - restart_count = -1 # first start is the same as a restart - def __init__(self, ready_queue, - init_callback=noop, send_events=False, hostname=None, - initial_prefetch_count=2, pool=None, app=None, + class Blueprint(bootsteps.Blueprint): + name = 'Consumer' + default_steps = [ + 'celery.worker.consumer:Connection', + 'celery.worker.consumer:Mingle', + 'celery.worker.consumer:Events', + 'celery.worker.consumer:Gossip', + 'celery.worker.consumer:Heart', + 'celery.worker.consumer:Control', + 'celery.worker.consumer:Tasks', + 'celery.worker.consumer:Evloop', + 'celery.worker.consumer:Agent', + ] + + def shutdown(self, parent): + self.send_all(parent, 'shutdown') + + def __init__(self, on_task_request, + init_callback=noop, hostname=None, + pool=None, app=None, timer=None, controller=None, hub=None, amqheartbeat=None, - **kwargs): - self.app = app_or_default(app) - self.connection = None - self.task_consumer = None + worker_options=None, disable_rate_limits=False, + initial_prefetch_count=2, prefetch_multiplier=1, **kwargs): + self.app = app self.controller = controller - self.broadcast_consumer = None - self.ready_queue = ready_queue - self.send_events = send_events self.init_callback = init_callback self.hostname = hostname or socket.gethostname() - self.initial_prefetch_count = initial_prefetch_count - self.event_dispatcher = None - self.heart = None + self.pid = os.getpid() self.pool = pool - self.timer = timer or timer2.default_timer - pidbox_state = AttributeDict(app=self.app, - hostname=self.hostname, - listener=self, # pre 2.2 - consumer=self) - self.pidbox_node = self.app.control.mailbox.Node( - safe_str(self.hostname), state=pidbox_state, handlers=Panel.data, - ) + self.timer = timer + self.strategies = self.Strategies() conninfo = self.app.connection() self.connection_errors = conninfo.connection_errors self.channel_errors = conninfo.channel_errors self._restart_state = restart_state(maxR=5, maxT=1) self._does_info = logger.isEnabledFor(logging.INFO) - self.strategies = {} + self.on_task_request = on_task_request + self.on_task_message = set() + self.amqheartbeat_rate = self.app.conf.BROKER_HEARTBEAT_CHECKRATE + self.disable_rate_limits = disable_rate_limits + self.initial_prefetch_count = initial_prefetch_count + self.prefetch_multiplier = prefetch_multiplier + + # this contains a tokenbucket for each task type by name, used for + # rate limits, or None if rate limits are disabled for that task. + self.task_buckets = defaultdict(lambda: None) + self.reset_rate_limits() + if hub: - hub.on_init.append(self.on_poll_init) - self.hub = hub - self._quick_put = self.ready_queue.put - self.amqheartbeat = amqheartbeat - if self.amqheartbeat is None: - self.amqheartbeat = self.app.conf.BROKER_HEARTBEAT - if not hub: + self.amqheartbeat = amqheartbeat + if self.amqheartbeat is None: + self.amqheartbeat = self.app.conf.BROKER_HEARTBEAT + self.hub = hub + else: + self.hub = None self.amqheartbeat = 0 + if not hasattr(self, 'loop'): + self.loop = loops.asynloop if hub else loops.synloop + if _detect_environment() == 'gevent': # there's a gevent bug that causes timeouts to not be reset, # so if the connection timeout is exceeded once, it can NEVER # connect again. self.app.conf.BROKER_CONNECTION_TIMEOUT = None - def update_strategies(self): - S = self.strategies - app = self.app - loader = app.loader - hostname = self.hostname - for name, task in self.app.tasks.iteritems(): - S[name] = task.start_strategy(app, self) - task.__trace__ = build_tracer(name, task, loader, hostname) + self.steps = [] + self.blueprint = self.Blueprint( + app=self.app, on_close=self.on_close, + ) + self.blueprint.apply(self, **dict(worker_options or {}, **kwargs)) + + def bucket_for_task(self, type): + limit = rate(getattr(type, 'rate_limit', None)) + return TokenBucket(limit, capacity=1) if limit else None + + def reset_rate_limits(self): + self.task_buckets.update( + (n, self.bucket_for_task(t)) for n, t in items(self.app.tasks) + ) + + def _update_prefetch_count(self, index=0): + """Update prefetch count after pool/shrink grow operations. + + Index must be the change in number of processes as a postive + (increasing) or negative (decreasing) number. + + .. note:: + + Currently pool grow operations will end up with an offset + of +1 if the initial size of the pool was 0 (e.g. + ``--autoscale=1,0``). + + """ + num_processes = self.pool.num_processes + if not self.initial_prefetch_count or not num_processes: + return # prefetch disabled + self.initial_prefetch_count = ( + self.pool.num_processes * self.prefetch_multiplier + ) + return self._update_qos_eventually(index) + + def _update_qos_eventually(self, index): + return (self.qos.decrement_eventually if index < 0 + else self.qos.increment_eventually)( + abs(index) * self.prefetch_multiplier) + + def _limit_task(self, request, bucket, tokens): + if not bucket.can_consume(tokens): + hold = bucket.expected_time(tokens) + self.timer.call_after( + hold, self._limit_task, (request, bucket, tokens), + ) + else: + task_reserved(request) + self.on_task_request(request) def start(self): - """Start the consumer. - - Automatically survives intermittent connection failure, - and will retry establishing the connection and restart - consuming messages. - - """ - - self.init_callback(self) - - while self._state != CLOSE: + blueprint, loop = self.blueprint, self.loop + while blueprint.state != CLOSE: self.restart_count += 1 - self.maybe_shutdown() + maybe_shutdown() try: - self._restart_state.step() - except RestartFreqExceeded as exc: - crit('Frequent restarts detected: %r', exc, exc_info=1) - sleep(1) - try: - self.reset_connection() - self.consume_messages() - except self.connection_errors + self.channel_errors: - error(RETRY_CONNECTION, exc_info=True) - - def on_poll_init(self, hub): - hub.update_readers(self.connection.eventmap) - self.connection.transport.on_poll_init(hub.poller) - - def consume_messages(self, sleep=sleep, min=min, Empty=Empty): - """Consume messages forever (or until an exception is raised).""" - hbrate = self.app.conf.BROKER_HEARTBEAT_CHECKRATE - - with self.hub as hub: - qos = self.qos - update_qos = qos.update - update_readers = hub.update_readers - readers, writers = hub.readers, hub.writers - poll = hub.poller.poll - fire_timers = hub.fire_timers - scheduled = hub.timer._queue - connection = self.connection - hb = self.amqheartbeat - hbtick = connection.heartbeat_check - on_poll_start = connection.transport.on_poll_start - on_poll_empty = connection.transport.on_poll_empty - strategies = self.strategies - drain_nowait = connection.drain_nowait - on_task_callbacks = hub.on_task - keep_draining = connection.transport.nb_keep_draining - errors = connection.connection_errors - - if hb and connection.supports_heartbeats: - hub.timer.apply_interval( - hb * 1000.0 / hbrate, hbtick, (hbrate, )) - - def on_task_received(body, message): - if on_task_callbacks: - [callback() for callback in on_task_callbacks] + blueprint.start(self) + except self.connection_errors as exc: + if isinstance(exc, OSError) and get_errno(exc) == errno.EMFILE: + raise # Too many open files + maybe_shutdown() try: - name = body['task'] - except (KeyError, TypeError): - return self.handle_unknown_message(body, message) - try: - strategies[name](message, body, message.ack_log_error) - except KeyError, exc: - self.handle_unknown_task(body, message, exc) - except InvalidTaskError, exc: - self.handle_invalid_task(body, message, exc) + self._restart_state.step() + except RestartFreqExceeded as exc: + crit('Frequent restarts detected: %r', exc, exc_info=1) + sleep(1) + if blueprint.state != CLOSE and self.connection: + warn(CONNECTION_RETRY, exc_info=True) + try: + self.connection.collect() + except Exception: + pass + self.on_close() + blueprint.restart(self) - self.task_consumer.callbacks = [on_task_received] - self.task_consumer.consume() + def register_with_event_loop(self, hub): + self.blueprint.send_all(self, 'register_with_event_loop', args=(hub, )) - debug('Ready to accept tasks!') + def shutdown(self): + self.in_shutdown = True + self.blueprint.shutdown(self) - while self._state != CLOSE and self.connection: - # shutdown if signal handlers told us to. - if state.should_stop: - raise SystemExit() - elif state.should_terminate: - raise SystemTerminate() + def stop(self): + self.blueprint.stop(self) - # fire any ready timers, this also returns - # the number of seconds until we need to fire timers again. - poll_timeout = (fire_timers(propagate=errors) if scheduled - else 1) + def on_ready(self): + callback, self.init_callback = self.init_callback, None + if callback: + callback(self) - # We only update QoS when there is no more messages to read. - # This groups together qos calls, and makes sure that remote - # control commands will be prioritized over task messages. - if qos.prev != qos.value: - update_qos() - - update_readers(on_poll_start()) - if readers or writers: - connection.more_to_read = True - while connection.more_to_read: - try: - events = poll(poll_timeout) - except ValueError: # Issue 882 - return - if not events: - on_poll_empty() - for fileno, event in events or (): - try: - if event & READ: - readers[fileno](fileno, event) - if event & WRITE: - writers[fileno](fileno, event) - if event & ERR: - for handlermap in readers, writers: - try: - handlermap[fileno](fileno, event) - except KeyError: - pass - except (KeyError, Empty): - continue - except socket.error: - if self._state != CLOSE: # pragma: no cover - raise - if keep_draining: - drain_nowait() - poll_timeout = 0 - else: - connection.more_to_read = False - else: - # no sockets yet, startup is probably not done. - sleep(min(poll_timeout, 0.1)) - - def on_task(self, task, task_reserved=task_reserved, - to_system_tz=timezone.to_system): - """Handle received task. - - If the task has an `eta` we enter it into the ETA schedule, - otherwise we move it the ready queue for immediate processing. - - """ - if task.revoked(): - return - - if self._does_info: - info('Got task from broker: %s', task) - - if self.event_dispatcher.enabled: - self.event_dispatcher.send( - 'task-received', - uuid=task.id, name=task.name, - args=safe_repr(task.args), kwargs=safe_repr(task.kwargs), - retries=task.request_dict.get('retries', 0), - eta=task.eta and task.eta.isoformat(), - expires=task.expires and task.expires.isoformat(), - ) - - if task.eta: - try: - if task.utc: - eta = to_timestamp(to_system_tz(task.eta)) - else: - eta = to_timestamp(task.eta, timezone.local) - except OverflowError, exc: - error("Couldn't convert eta %s to timestamp: %r. Task: %r", - task.eta, exc, task.info(safe=True), exc_info=True) - task.acknowledge() - else: - self.qos.increment_eventually() - self.timer.apply_at( - eta, self.apply_eta_task, (task, ), priority=6, - ) - else: - task_reserved(task) - self._quick_put(task) - - def on_control(self, body, message): - """Process remote control command message.""" - try: - self.pidbox_node.handle_message(body, message) - except KeyError, exc: - error('No such control command: %s', exc) - except Exception, exc: - error('Control command error: %r', exc, exc_info=True) - self.reset_pidbox_node() - - def apply_eta_task(self, task): - """Method called by the timer to apply a task with an - ETA/countdown.""" - task_reserved(task) - self._quick_put(task) - self.qos.decrement_eventually() - - def _message_report(self, body, message): - return MESSAGE_REPORT % (dump_body(message, body), - safe_repr(message.content_type), - safe_repr(message.content_encoding), - safe_repr(message.delivery_info)) - - def handle_unknown_message(self, body, message): - warn(UNKNOWN_FORMAT, self._message_report(body, message)) - message.reject_log_error(logger, self.connection_errors) - - def handle_unknown_task(self, body, message, exc): - error(UNKNOWN_TASK_ERROR, exc, dump_body(message, body), exc_info=True) - message.reject_log_error(logger, self.connection_errors) - - def handle_invalid_task(self, body, message, exc): - error(INVALID_TASK_ERROR, exc, dump_body(message, body), exc_info=True) - message.reject_log_error(logger, self.connection_errors) - - def receive_message(self, body, message): - """Handles incoming messages. - - :param body: The message body. - :param message: The kombu message object. - - """ - try: - name = body['task'] - except (KeyError, TypeError): - return self.handle_unknown_message(body, message) - - try: - self.strategies[name](message, body, message.ack_log_error) - except KeyError, exc: - self.handle_unknown_task(body, message, exc) - except InvalidTaskError, exc: - self.handle_invalid_task(body, message, exc) - - def maybe_conn_error(self, fun): - """Applies function but ignores any connection or channel - errors raised.""" - try: - fun() - except (AttributeError, ) + \ - self.connection_errors + \ - self.channel_errors: - pass - - def close_connection(self): - """Closes the current broker connection and all open channels.""" - - # We must set self.connection to None here, so - # that the green pidbox thread exits. - connection, self.connection = self.connection, None - - if self.task_consumer: - debug('Closing consumer channel...') - self.task_consumer = self.maybe_conn_error( - self.task_consumer.close) - - self.stop_pidbox_node() - - if connection: - debug('Closing broker connection...') - self.maybe_conn_error(connection.close) - - def stop_consumers(self, close_connection=True, join=True): - """Stop consuming tasks and broadcast commands, also stops - the heartbeat thread and event dispatcher. - - :keyword close_connection: Set to False to skip closing the broker - connection. - - """ - if not self._state == RUN: - return - - if self.heart: - # Stop the heartbeat thread if it's running. - debug('Heart: Going into cardiac arrest...') - self.heart = self.heart.stop() - - debug('Cancelling task consumer...') - if join and self.task_consumer: - self.maybe_conn_error(self.task_consumer.cancel) - - if self.event_dispatcher: - debug('Shutting down event dispatcher...') - self.event_dispatcher = self.maybe_conn_error( - self.event_dispatcher.close) - - debug('Cancelling broadcast consumer...') - if join and self.broadcast_consumer: - self.maybe_conn_error(self.broadcast_consumer.cancel) - - if close_connection: - self.close_connection() + def loop_args(self): + return (self, self.connection, self.task_consumer, + self.blueprint, self.hub, self.qos, self.amqheartbeat, + self.app.clock, self.amqheartbeat_rate) def on_decode_error(self, message, exc): """Callback called if an error occurs while decoding @@ -688,113 +323,19 @@ class Consumer(object): dump_body(message, message.body), exc_info=1) message.ack() - def reset_pidbox_node(self): - """Sets up the process mailbox.""" - self.stop_pidbox_node() - # close previously opened channel if any. - if self.pidbox_node.channel: - try: - self.pidbox_node.channel.close() - except self.connection_errors + self.channel_errors: - pass - - if self.pool is not None and self.pool.is_green: - return self.pool.spawn_n(self._green_pidbox_node) - self.pidbox_node.channel = self.connection.channel() - self.broadcast_consumer = self.pidbox_node.listen( - callback=self.on_control, - ) - - def stop_pidbox_node(self): - if self._pidbox_node_stopped: - self._pidbox_node_shutdown.set() - debug('Waiting for broadcast thread to shutdown...') - self._pidbox_node_stopped.wait() - self._pidbox_node_stopped = self._pidbox_node_shutdown = None - elif self.broadcast_consumer: - debug('Closing broadcast channel...') - self.broadcast_consumer = \ - self.maybe_conn_error(self.broadcast_consumer.channel.close) - - def _green_pidbox_node(self): - """Sets up the process mailbox when running in a greenlet - environment.""" - # THIS CODE IS TERRIBLE - # Luckily work has already started rewriting the Consumer for 4.0. - self._pidbox_node_shutdown = threading.Event() - self._pidbox_node_stopped = threading.Event() - try: - with self._open_connection() as conn: - info('pidbox: Connected to %s.', conn.as_uri()) - self.pidbox_node.channel = conn.default_channel - self.broadcast_consumer = self.pidbox_node.listen( - callback=self.on_control, - ) - - with self.broadcast_consumer: - while not self._pidbox_node_shutdown.isSet(): - try: - conn.drain_events(timeout=1.0) - except socket.timeout: - pass - finally: - self._pidbox_node_stopped.set() - - def reset_connection(self): - """Re-establish the broker connection and set up consumers, - heartbeat and the event dispatcher.""" - debug('Re-establishing connection to the broker...') - self.stop_consumers(join=False) - + def on_close(self): # Clear internal queues to get rid of old messages. # They can't be acked anyway, as a delivery tag is specific # to the current channel. - self.ready_queue.clear() - self.timer.clear() - state.reserved_requests.clear() + if self.controller and self.controller.semaphore: + self.controller.semaphore.clear() + if self.timer: + self.timer.clear() + reserved_requests.clear() + if self.pool and self.pool.flush: + self.pool.flush() - # Re-establish the broker connection and setup the task consumer. - self.connection = self._open_connection() - info('consumer: Connected to %s.', self.connection.as_uri()) - self.task_consumer = self.app.amqp.TaskConsumer( - self.connection, on_decode_error=self.on_decode_error, - ) - # QoS: Reset prefetch window. - self.qos = QoS(self.task_consumer, self.initial_prefetch_count) - self.qos.update() - - # Setup the process mailbox. - self.reset_pidbox_node() - - # Flush events sent while connection was down. - prev_event_dispatcher = self.event_dispatcher - self.event_dispatcher = self.app.events.Dispatcher( - self.connection, hostname=self.hostname, enabled=self.send_events, - ) - if prev_event_dispatcher: - self.event_dispatcher.copy_buffer(prev_event_dispatcher) - self.event_dispatcher.flush() - - # Restart heartbeat thread. - self.restart_heartbeat() - - # reload all task's execution strategies. - self.update_strategies() - - # We're back! - self._state = RUN - - def restart_heartbeat(self): - """Restart the heartbeat thread. - - This thread sends heartbeat events at intervals so monitors - can tell if the worker is off-line/missing. - - """ - self.heart = Heart(self.timer, self.event_dispatcher) - self.heart.start() - - def _open_connection(self): + def connect(self): """Establish the broker connection. Will retry establishing the connection if the @@ -805,45 +346,26 @@ class Consumer(object): # Callback called for each retry while the connection # can't be established. - def _error_handler(exc, interval, next_step=CONNECTION_RETRY): + def _error_handler(exc, interval, next_step=CONNECTION_RETRY_STEP): if getattr(conn, 'alt', None) and interval == 0: next_step = CONNECTION_FAILOVER error(CONNECTION_ERROR, conn.as_uri(), exc, - next_step % {'when': humanize_seconds(interval, 'in', ' ')}) + next_step.format(when=humanize_seconds(interval, 'in', ' '))) # remember that the connection is lazy, it won't establish - # until it's needed. + # until needed. if not self.app.conf.BROKER_CONNECTION_RETRY: # retry disabled, just call connect directly. conn.connect() return conn - return conn.ensure_connection( + conn = conn.ensure_connection( _error_handler, self.app.conf.BROKER_CONNECTION_MAX_RETRIES, - callback=self.maybe_shutdown, + callback=maybe_shutdown, ) - - def stop(self): - """Stop consuming. - - Does not close the broker connection, so be sure to call - :meth:`close_connection` when you are finished with it. - - """ - # Notifies other threads that this instance can't be used - # anymore. - self.close() - debug('Stopping consumers...') - self.stop_consumers(close_connection=False, join=True) - - def close(self): - self._state = CLOSE - - def maybe_shutdown(self): - if state.should_stop: - raise SystemExit() - elif state.should_terminate: - raise SystemTerminate() + if self.hub: + conn.transport.register_with_event_loop(conn.connection, self.hub) + return conn def add_task_queue(self, queue, exchange=None, exchange_type=None, routing_key=None, **options): @@ -865,45 +387,383 @@ class Consumer(object): if not cset.consuming_from(queue): cset.add_queue(q) cset.consume() - logger.info('Started consuming from %r', queue) + info('Started consuming from %r', queue) def cancel_task_queue(self, queue): - self.app.amqp.queues.select_remove(queue) + self.app.amqp.queues.deselect(queue) self.task_consumer.cancel_by_queue(queue) - @property - def info(self): - """Returns information about this consumer instance - as a dict. + def apply_eta_task(self, task): + """Method called by the timer to apply a task with an + ETA/countdown.""" + task_reserved(task) + self.on_task_request(task) + self.qos.decrement_eventually() - This is also the consumer related info returned by - ``celeryctl stats``. + def _message_report(self, body, message): + return MESSAGE_REPORT.format(dump_body(message, body), + safe_repr(message.content_type), + safe_repr(message.content_encoding), + safe_repr(message.delivery_info)) - """ - conninfo = {} - if self.connection: - conninfo = self.connection.info() - conninfo.pop('password', None) # don't send password. - return {'broker': conninfo, 'prefetch_count': self.qos.value} + def on_unknown_message(self, body, message): + warn(UNKNOWN_FORMAT, self._message_report(body, message)) + message.reject_log_error(logger, self.connection_errors) + def on_unknown_task(self, body, message, exc): + error(UNKNOWN_TASK_ERROR, exc, dump_body(message, body), exc_info=True) + message.reject_log_error(logger, self.connection_errors) -class BlockingConsumer(Consumer): + def on_invalid_task(self, body, message, exc): + error(INVALID_TASK_ERROR, exc, dump_body(message, body), exc_info=True) + message.reject_log_error(logger, self.connection_errors) - def consume_messages(self): - # receive_message handles incoming messages. - self.task_consumer.register_callback(self.receive_message) - self.task_consumer.consume() + def update_strategies(self): + loader = self.app.loader + for name, task in items(self.app.tasks): + self.strategies[name] = task.start_strategy(self.app, self) + task.__trace__ = build_tracer(name, task, loader, self.hostname, + app=self.app) - debug('Ready to accept tasks!') + def create_task_handler(self): + strategies = self.strategies + on_unknown_message = self.on_unknown_message + on_unknown_task = self.on_unknown_task + on_invalid_task = self.on_invalid_task + callbacks = self.on_task_message - while self._state != CLOSE and self.connection: - self.maybe_shutdown() - if self.qos.prev != self.qos.value: # pragma: no cover - self.qos.update() + def on_task_received(body, message): try: - self.connection.drain_events(timeout=10.0) - except socket.timeout: - pass - except socket.error: - if self._state != CLOSE: # pragma: no cover - raise + name = body['task'] + except (KeyError, TypeError): + return on_unknown_message(body, message) + + try: + strategies[name](message, body, + message.ack_log_error, + message.reject_log_error, + callbacks) + except KeyError as exc: + on_unknown_task(body, message, exc) + except InvalidTaskError as exc: + on_invalid_task(body, message, exc) + + return on_task_received + + def __repr__(self): + return ''.format( + self=self, state=self.blueprint.human_state(), + ) + + +class Connection(bootsteps.StartStopStep): + + def __init__(self, c, **kwargs): + c.connection = None + + def start(self, c): + c.connection = c.connect() + info('Connected to %s', c.connection.as_uri()) + + def shutdown(self, c): + # We must set self.connection to None here, so + # that the green pidbox thread exits. + connection, c.connection = c.connection, None + if connection: + ignore_errors(connection, connection.close) + + def info(self, c): + info = c.connection.info() + info.pop('password', None) # don't send password. + return {'broker': info} + + +class Events(bootsteps.StartStopStep): + requires = (Connection, ) + + def __init__(self, c, send_events=None, **kwargs): + self.send_events = True + self.groups = None if send_events else ['worker'] + c.event_dispatcher = None + + def start(self, c): + # flush events sent while connection was down. + prev = c.event_dispatcher + dis = c.event_dispatcher = c.app.events.Dispatcher( + c.connect(), hostname=c.hostname, + enabled=self.send_events, groups=self.groups, + ) + if prev: + dis.extend_buffer(prev) + dis.flush() + + def stop(self, c): + if c.event_dispatcher: + # remember changes from remote control commands: + self.groups = c.event_dispatcher.groups + + # close custom connection + if c.event_dispatcher.connection: + ignore_errors(c, c.event_dispatcher.connection.close) + ignore_errors(c, c.event_dispatcher.close) + c.event_dispatcher = None + shutdown = stop + + +class Heart(bootsteps.StartStopStep): + requires = (Events, ) + + def __init__(self, c, without_heartbeat=False, **kwargs): + self.enabled = not without_heartbeat + c.heart = None + + def start(self, c): + c.heart = heartbeat.Heart(c.timer, c.event_dispatcher) + c.heart.start() + + def stop(self, c): + c.heart = c.heart and c.heart.stop() + shutdown = stop + + +class Control(bootsteps.StartStopStep): + requires = (Events, ) + + def __init__(self, c, **kwargs): + self.is_green = c.pool is not None and c.pool.is_green + self.box = (pidbox.gPidbox if self.is_green else pidbox.Pidbox)(c) + self.start = self.box.start + self.stop = self.box.stop + self.shutdown = self.box.shutdown + + def include_if(self, c): + return c.app.conf.CELERY_ENABLE_REMOTE_CONTROL + + +class Tasks(bootsteps.StartStopStep): + requires = (Events, ) + + def __init__(self, c, **kwargs): + c.task_consumer = c.qos = None + + def start(self, c): + c.update_strategies() + c.task_consumer = c.app.amqp.TaskConsumer( + c.connection, on_decode_error=c.on_decode_error, + ) + c.qos = QoS(c.task_consumer.qos, c.initial_prefetch_count) + c.qos.update() # set initial prefetch count + + def stop(self, c): + if c.task_consumer: + debug('Cancelling task consumer...') + ignore_errors(c, c.task_consumer.cancel) + + def shutdown(self, c): + if c.task_consumer: + self.stop(c) + debug('Closing consumer channel...') + ignore_errors(c, c.task_consumer.close) + c.task_consumer = None + + def info(self, c): + return {'prefetch_count': c.qos.value} + + +class Agent(bootsteps.StartStopStep): + conditional = True + requires = (Connection, ) + + def __init__(self, c, **kwargs): + self.agent_cls = self.enabled = c.app.conf.CELERYD_AGENT + + def create(self, c): + agent = c.agent = self.instantiate(self.agent_cls, c.connection) + return agent + + +class Gossip(bootsteps.ConsumerStep): + label = 'Gossip' + requires = (Events, ) + _cons_stamp_fields = itemgetter( + 'id', 'clock', 'hostname', 'pid', 'topic', 'action', 'cver', + ) + + def __init__(self, c, without_gossip=False, interval=5.0, **kwargs): + self.enabled = not without_gossip + self.app = c.app + c.gossip = self + self.Receiver = c.app.events.Receiver + self.hostname = c.hostname + self.full_hostname = '.'.join([self.hostname, str(c.pid)]) + + self.timer = c.timer + self.state = c.app.events.State() + self.interval = interval + self._tref = None + self.consensus_requests = defaultdict(list) + self.consensus_replies = {} + self.update_state = self.state.worker_event + self.event_handlers = { + 'worker.elect': self.on_elect, + 'worker.elect.ack': self.on_elect_ack, + } + self.clock = c.app.clock + + self.election_handlers = { + 'task': self.call_task + } + + def election(self, id, topic, action=None): + self.consensus_replies[id] = [] + self.dispatcher.send( + 'worker-elect', + id=id, topic=topic, action=action, cver=1, + ) + + def call_task(self, task): + try: + signature(task, app=self.app).apply_async() + except Exception as exc: + error('Could not call task: %r', exc, exc_info=1) + + def on_elect(self, event): + try: + (id_, clock, hostname, pid, + topic, action, _) = self._cons_stamp_fields(event) + except KeyError as exc: + return error('election request missing field %s', exc, exc_info=1) + heappush( + self.consensus_requests[id_], + (clock, '%s.%s' % (hostname, pid), topic, action), + ) + self.dispatcher.send('worker-elect-ack', id=id_) + + def start(self, c): + super(Gossip, self).start(c) + self.dispatcher = c.event_dispatcher + + def on_elect_ack(self, event): + id = event['id'] + try: + replies = self.consensus_replies[id] + except KeyError: + return # not for us + alive_workers = self.state.alive_workers() + replies.append(event['hostname']) + + if len(replies) >= len(alive_workers): + _, leader, topic, action = self.clock.sort_heap( + self.consensus_requests[id], + ) + if leader == self.full_hostname: + info('I won the election %r', id) + try: + handler = self.election_handlers[topic] + except KeyError: + error('Unknown election topic %r', topic, exc_info=1) + else: + handler(action) + else: + info('node %s elected for %r', leader, id) + self.consensus_requests.pop(id, None) + self.consensus_replies.pop(id, None) + + def on_node_join(self, worker): + info('%s joined the party', worker.hostname) + + def on_node_leave(self, worker): + info('%s left', worker.hostname) + + def on_node_lost(self, worker): + warn('%s went missing!', worker.hostname) + + def register_timer(self): + if self._tref is not None: + self._tref.cancel() + self._tref = self.timer.call_repeatedly(self.interval, self.periodic) + + def periodic(self): + workers = self.state.workers + dirty = set() + for worker in values(workers): + if not worker.alive: + dirty.add(worker) + self.on_node_lost(worker) + for worker in dirty: + workers.pop(worker.hostname, None) + + def get_consumers(self, channel): + self.register_timer() + ev = self.Receiver(channel, routing_key='worker.#') + return [kombu.Consumer( + channel, + queues=[ev.queue], + on_message=partial(self.on_message, ev.event_from_message), + no_ack=True + )] + + def on_message(self, prepare, message): + _type = message.delivery_info['routing_key'] + try: + handler = self.event_handlers[_type] + except KeyError: + pass + else: + return handler(message.payload) + + hostname = (message.headers.get('hostname') or + message.payload['hostname']) + if hostname != self.hostname: + type, event = prepare(message.payload) + group, _, subject = type.partition('-') + worker, created = self.update_state(subject, event) + if subject == 'offline': + try: + self.on_node_leave(worker) + finally: + self.state.workers.pop(worker.hostname, None) + elif created or subject == 'online': + self.on_node_join(worker) + else: + self.clock.forward() + + +class Mingle(bootsteps.StartStopStep): + label = 'Mingle' + requires = (Gossip, ) + + def __init__(self, c, without_mingle=False, **kwargs): + self.enabled = not without_mingle + + def start(self, c): + info('mingle: searching for neighbors') + I = c.app.control.inspect(timeout=1.0, connection=c.connection) + replies = I.hello(c.hostname, revoked._data) or {} + replies.pop(c.hostname, None) + if replies: + info('mingle: hello %s! sync with me', + ', '.join(reply for reply, value in items(replies) if value)) + for reply in values(replies): + if reply: + try: + other_clock, other_revoked = MINGLE_GET_FIELDS(reply) + except KeyError: # reply from pre-3.1 worker + pass + else: + c.app.clock.adjust(other_clock) + revoked.update(other_revoked) + else: + info('mingle: all alone') + + +class Evloop(bootsteps.StartStopStep): + label = 'event loop' + last = True + + def start(self, c): + self.patch_all(c) + c.loop(*c.loop_args()) + + def patch_all(self, c): + c.qos._mutex = DummyLock() diff --git a/awx/lib/site-packages/celery/worker/control.py b/awx/lib/site-packages/celery/worker/control.py index 4ad87e968b..c69254b79e 100644 --- a/awx/lib/site-packages/celery/worker/control.py +++ b/awx/lib/site-packages/celery/worker/control.py @@ -8,22 +8,22 @@ """ from __future__ import absolute_import -import logging -import os - -from datetime import datetime +import tempfile from kombu.utils.encoding import safe_repr +from celery.five import UserDict, items, StringIO from celery.platforms import signals as _signals from celery.utils import timeutils -from celery.utils.compat import UserDict +from celery.utils.functional import maybe_list from celery.utils.log import get_logger from celery.utils import jsonify -from . import state +from . import state as worker_state from .state import revoked +from .job import Request +__all__ = ['Panel'] DEFAULT_TASK_INFO_ITEMS = ('exchange', 'routing_key', 'rate_limit') logger = get_logger(__name__) @@ -37,61 +37,104 @@ class Panel(UserDict): return method +def _find_requests_by_id(ids, requests): + found, total = 0, len(ids) + for request in requests: + if request.id in ids: + yield request + found += 1 + if found >= total: + break + + @Panel.register -def revoke(panel, task_id, terminate=False, signal=None, **kwargs): +def query_task(state, ids, **kwargs): + ids = maybe_list(ids) + + def reqinfo(state, req): + return state, req.info() + + reqs = dict((req.id, ('reserved', req.info())) + for req in _find_requests_by_id( + ids, worker_state.reserved_requests)) + reqs.update(dict( + (req.id, ('active', req.info())) + for req in _find_requests_by_id( + ids, worker_state.active_requests, + ) + )) + + return reqs + + +@Panel.register +def revoke(state, task_id, terminate=False, signal=None, **kwargs): """Revoke task by task id.""" - revoked.add(task_id) + # supports list argument since 3.1 + task_ids, task_id = set(maybe_list(task_id) or []), None + size = len(task_ids) + terminated = set() + + revoked.update(task_ids) if terminate: signum = _signals.signum(signal or 'TERM') - for request in state.reserved_requests: - if request.id == task_id: - logger.info('Terminating %s (%s)', task_id, signum) - request.terminate(panel.consumer.pool, signal=signum) - break - else: - return {'ok': 'terminate: task %s not found' % (task_id, )} - return {'ok': 'terminating %s (%s)' % (task_id, signal)} + # reserved_requests changes size during iteration + # so need to consume the items first, then terminate after. + requests = set(_find_requests_by_id( + task_ids, + worker_state.reserved_requests, + )) + for request in requests: + if request.id not in terminated: + terminated.add(request.id) + logger.info('Terminating %s (%s)', request.id, signum) + request.terminate(state.consumer.pool, signal=signum) + if len(terminated) >= size: + break - logger.info('Revoking task %s', task_id) - return {'ok': 'revoking task %s' % (task_id, )} + if not terminated: + return {'ok': 'terminate: tasks unknown'} + return {'ok': 'terminate: {0}'.format(', '.join(terminated))} + + idstr = ', '.join(task_ids) + logger.info('Tasks flagged as revoked: %s', idstr) + return {'ok': 'tasks {0} flagged as revoked'.format(idstr)} @Panel.register -def report(panel): - return {'ok': panel.app.bugreport()} +def report(state): + return {'ok': state.app.bugreport()} @Panel.register -def enable_events(panel): - dispatcher = panel.consumer.event_dispatcher - if not dispatcher.enabled: - dispatcher.enable() - dispatcher.send('worker-online') - logger.info('Events enabled by remote.') - return {'ok': 'events enabled'} - return {'ok': 'events already enabled'} +def enable_events(state): + dispatcher = state.consumer.event_dispatcher + if 'task' not in dispatcher.groups: + dispatcher.groups.add('task') + logger.info('Events of group {task} enabled by remote.') + return {'ok': 'task events enabled'} + return {'ok': 'task events already enabled'} @Panel.register -def disable_events(panel): - dispatcher = panel.consumer.event_dispatcher - if dispatcher.enabled: - dispatcher.send('worker-offline') - dispatcher.disable() - logger.info('Events disabled by remote.') - return {'ok': 'events disabled'} - return {'ok': 'events already disabled'} +def disable_events(state): + dispatcher = state.consumer.event_dispatcher + if 'task' in dispatcher.groups: + dispatcher.groups.discard('task') + logger.info('Events of group {task} disabled by remote.') + return {'ok': 'task events disabled'} + return {'ok': 'task events already disabled'} @Panel.register -def heartbeat(panel): +def heartbeat(state): logger.debug('Heartbeat requested by remote.') - dispatcher = panel.consumer.event_dispatcher - dispatcher.send('worker-heartbeat', freq=5, **state.SOFTWARE_INFO) + dispatcher = state.consumer.event_dispatcher + dispatcher.send('worker-heartbeat', freq=5, **worker_state.SOFTWARE_INFO) @Panel.register -def rate_limit(panel, task_name, rate_limit, **kwargs): +def rate_limit(state, task_name, rate_limit, **kwargs): """Set new rate limit for a task type. See :attr:`celery.task.base.Task.rate_limit`. @@ -103,21 +146,17 @@ def rate_limit(panel, task_name, rate_limit, **kwargs): try: timeutils.rate(rate_limit) - except ValueError, exc: - return {'error': 'Invalid rate limit string: %s' % exc} + except ValueError as exc: + return {'error': 'Invalid rate limit string: {0!r}'.format(exc)} try: - panel.app.tasks[task_name].rate_limit = rate_limit + state.app.tasks[task_name].rate_limit = rate_limit except KeyError: logger.error('Rate limit attempt for unknown task %s', task_name, exc_info=True) return {'error': 'unknown task'} - if not hasattr(panel.consumer.ready_queue, 'refresh'): - logger.error('Rate limit attempt, but rate limits disabled.') - return {'error': 'rate limits disabled'} - - panel.consumer.ready_queue.refresh() + state.consumer.reset_rate_limits() if not rate_limit: logger.info('Rate limits disabled for tasks of type %s', task_name) @@ -129,9 +168,9 @@ def rate_limit(panel, task_name, rate_limit, **kwargs): @Panel.register -def time_limit(panel, task_name=None, hard=None, soft=None, **kwargs): +def time_limit(state, task_name=None, hard=None, soft=None, **kwargs): try: - task = panel.app.tasks[task_name] + task = state.app.tasks[task_name] except KeyError: logger.error('Change time limit attempt for unknown task %s', task_name, exc_info=True) @@ -146,157 +185,191 @@ def time_limit(panel, task_name=None, hard=None, soft=None, **kwargs): @Panel.register -def dump_schedule(panel, safe=False, **kwargs): - from celery.worker.job import Request - schedule = panel.consumer.timer.schedule - if not schedule.queue: - logger.debug('--Empty schedule--') - return [] +def dump_schedule(state, safe=False, **kwargs): - formatitem = lambda i, item: '%s. %s pri%s %r' % ( - i, datetime.utcfromtimestamp(item['eta']), - item['priority'], item['item'], - ) - if logger.isEnabledFor(logging.DEBUG): - logger.debug('* Dump of current schedule:\n%s', '\n'.join( - formatitem(i, item) for i, item in enumerate(schedule.info()) - )) - scheduled_tasks = [] - for info in schedule.info(): - item = info['item'] - if item.args and isinstance(item.args[0], Request): - scheduled_tasks.append({ - 'eta': info['eta'], - 'priority': info['priority'], - 'request': item.args[0].info(safe=safe), - }) - return scheduled_tasks + def prepare_entries(): + for waiting in state.consumer.timer.schedule.queue: + try: + arg0 = waiting.entry.args[0] + except (IndexError, TypeError): + continue + else: + if isinstance(arg0, Request): + yield {'eta': arg0.eta.isoformat() if arg0.eta else None, + 'priority': waiting.priority, + 'request': arg0.info(safe=safe)} + return list(prepare_entries()) @Panel.register -def dump_reserved(panel, safe=False, **kwargs): - reserved = state.reserved_requests - state.active_requests +def dump_reserved(state, safe=False, **kwargs): + reserved = worker_state.reserved_requests - worker_state.active_requests if not reserved: - logger.debug('--Empty queue--') return [] - if logger.isEnabledFor(logging.DEBUG): - logger.debug('* Dump of currently reserved tasks:\n%s', - '\n'.join(safe_repr(r) for r in reserved)) return [request.info(safe=safe) for request in reserved] @Panel.register -def dump_active(panel, safe=False, **kwargs): - return [request.info(safe=safe) for request in state.active_requests] +def dump_active(state, safe=False, **kwargs): + return [request.info(safe=safe) + for request in worker_state.active_requests] @Panel.register -def stats(panel, **kwargs): - asinfo = {} - if panel.consumer.controller.autoscaler: - asinfo = panel.consumer.controller.autoscaler.info() - return {'total': state.total_count, - 'consumer': panel.consumer.info, - 'pool': panel.consumer.pool.info, - 'autoscaler': asinfo, - 'pid': os.getpid()} +def stats(state, **kwargs): + return state.consumer.controller.stats() @Panel.register -def dump_revoked(panel, **kwargs): - return list(state.revoked) +def objgraph(state, num=200, max_depth=10, type='Request'): # pragma: no cover + try: + import objgraph + except ImportError: + raise ImportError('Requires the objgraph library') + print('Dumping graph for type %r' % (type, )) + with tempfile.NamedTemporaryFile(prefix='cobjg', + suffix='.png', delete=False) as fh: + objects = objgraph.by_type(type)[:num] + objgraph.show_backrefs( + objects, + max_depth=max_depth, highlight=lambda v: v in objects, + filename=fh.name, + ) + return {'filename': fh.name} @Panel.register -def dump_tasks(panel, taskinfoitems=None, **kwargs): - tasks = panel.app.tasks +def memsample(state, **kwargs): # pragma: no cover + from celery.utils.debug import sample_mem + return sample_mem() + + +@Panel.register +def memdump(state, samples=10, **kwargs): # pragma: no cover + from celery.utils.debug import memdump + out = StringIO() + memdump(file=out) + return out.getvalue() + + +@Panel.register +def clock(state, **kwargs): + return {'clock': state.app.clock.value} + + +@Panel.register +def dump_revoked(state, **kwargs): + return list(worker_state.revoked) + + +@Panel.register +def hello(state, from_node, revoked=None, **kwargs): + if from_node != state.hostname: + logger.info('sync with %s', from_node) + if revoked: + worker_state.revoked.update(revoked) + return {'revoked': worker_state.revoked._data, + 'clock': state.app.clock.forward()} + + +@Panel.register +def dump_tasks(state, taskinfoitems=None, **kwargs): + tasks = state.app.tasks taskinfoitems = taskinfoitems or DEFAULT_TASK_INFO_ITEMS def _extract_info(task): - fields = dict( - (field, str(getattr(task, field, None))) - for field in taskinfoitems - if getattr(task, field, None) is not None) - info = ['='.join(f) for f in fields.items()] - if not info: - return task.name - return '%s [%s]' % (task.name, ' '.join(info)) + fields = dict((field, str(getattr(task, field, None))) + for field in taskinfoitems + if getattr(task, field, None) is not None) + if fields: + info = ['='.join(f) for f in items(fields)] + return '{0} [{1}]'.format(task.name, ' '.join(info)) + return task.name - info = [_extract_info(tasks[task]) for task in sorted(tasks)] - if logger.isEnabledFor(logging.DEBUG): - logger.debug('* Dump of currently registered tasks:\n%s', - '\n'.join(info)) - return info + return [_extract_info(tasks[task]) for task in sorted(tasks)] @Panel.register -def ping(panel, **kwargs): - return 'pong' +def ping(state, **kwargs): + return {'ok': 'pong'} @Panel.register -def pool_grow(panel, n=1, **kwargs): - if panel.consumer.controller.autoscaler: - panel.consumer.controller.autoscaler.force_scale_up(n) +def pool_grow(state, n=1, **kwargs): + if state.consumer.controller.autoscaler: + state.consumer.controller.autoscaler.force_scale_up(n) else: - panel.consumer.pool.grow(n) - return {'ok': 'spawned worker processes'} + state.consumer.pool.grow(n) + state.consumer._update_prefetch_count(n) + return {'ok': 'pool will grow'} @Panel.register -def pool_shrink(panel, n=1, **kwargs): - if panel.consumer.controller.autoscaler: - panel.consumer.controller.autoscaler.force_scale_down(n) +def pool_shrink(state, n=1, **kwargs): + if state.consumer.controller.autoscaler: + state.consumer.controller.autoscaler.force_scale_down(n) else: - panel.consumer.pool.shrink(n) - return {'ok': 'terminated worker processes'} + state.consumer.pool.shrink(n) + state.consumer._update_prefetch_count(-n) + return {'ok': 'pool will shrink'} @Panel.register -def pool_restart(panel, modules=None, reload=False, reloader=None, **kwargs): - if panel.app.conf.CELERYD_POOL_RESTARTS: - panel.consumer.controller.reload(modules, reload, reloader=reloader) +def pool_restart(state, modules=None, reload=False, reloader=None, **kwargs): + if state.app.conf.CELERYD_POOL_RESTARTS: + state.consumer.controller.reload(modules, reload, reloader=reloader) return {'ok': 'reload started'} else: raise ValueError('Pool restarts not enabled') @Panel.register -def autoscale(panel, max=None, min=None): - autoscaler = panel.consumer.controller.autoscaler +def autoscale(state, max=None, min=None): + autoscaler = state.consumer.controller.autoscaler if autoscaler: max_, min_ = autoscaler.update(max, min) - return {'ok': 'autoscale now min=%r max=%r' % (max_, min_)} + return {'ok': 'autoscale now min={0} max={1}'.format(max_, min_)} raise ValueError('Autoscale not enabled') @Panel.register -def shutdown(panel, msg='Got shutdown from remote', **kwargs): +def shutdown(state, msg='Got shutdown from remote', **kwargs): logger.warning(msg) raise SystemExit(msg) @Panel.register -def add_consumer(panel, queue, exchange=None, exchange_type=None, +def add_consumer(state, queue, exchange=None, exchange_type=None, routing_key=None, **options): - panel.consumer.add_task_queue(queue, exchange, exchange_type, + state.consumer.add_task_queue(queue, exchange, exchange_type, routing_key, **options) - return {'ok': 'add consumer %r' % (queue, )} + return {'ok': 'add consumer {0}'.format(queue)} @Panel.register -def cancel_consumer(panel, queue=None, **_): - panel.consumer.cancel_task_queue(queue) - return {'ok': 'no longer consuming from %s' % (queue, )} +def cancel_consumer(state, queue=None, **_): + state.consumer.cancel_task_queue(queue) + return {'ok': 'no longer consuming from {0}'.format(queue)} @Panel.register -def active_queues(panel): - """Returns the queues associated with each worker.""" +def active_queues(state): + """Return information about the queues a worker consumes from.""" return [dict(queue.as_dict(recurse=True)) - for queue in panel.consumer.task_consumer.queues] + for queue in state.consumer.task_consumer.queues] + + +def _wanted_config_key(key): + return key.isupper() and not key.startswith('__') @Panel.register -def dump_conf(panel, **kwargs): - return jsonify(dict(panel.app.conf)) +def dump_conf(state, with_defaults=False, **kwargs): + return jsonify(state.app.conf.table(with_defaults=with_defaults), + keyfilter=_wanted_config_key, + unknown_type_filter=safe_repr) + + +@Panel.register +def election(state, id, topic, action=None, **kwargs): + state.consumer.gossip.election(id, topic, action) diff --git a/awx/lib/site-packages/celery/worker/heartbeat.py b/awx/lib/site-packages/celery/worker/heartbeat.py index d930f49be7..d945239b12 100644 --- a/awx/lib/site-packages/celery/worker/heartbeat.py +++ b/awx/lib/site-packages/celery/worker/heartbeat.py @@ -9,8 +9,13 @@ """ from __future__ import absolute_import +from celery.five import values +from celery.utils.sysinfo import load_average + from .state import SOFTWARE_INFO, active_requests, total_count +__all__ = ['Heart'] + class Heart(object): """Timer sending heartbeats at regular intervals. @@ -25,25 +30,25 @@ class Heart(object): def __init__(self, timer, eventer, interval=None): self.timer = timer self.eventer = eventer - self.interval = float(interval or 5.0) + self.interval = float(interval or 2.0) self.tref = None - # Make event dispatcher start/stop us when it's - # enabled/disabled. + # Make event dispatcher start/stop us when enabled/disabled. self.eventer.on_enabled.add(self.start) self.eventer.on_disabled.add(self.stop) def _send(self, event): return self.eventer.send(event, freq=self.interval, active=len(active_requests), - processed=sum(total_count.itervalues()), + processed=sum(values(total_count)), + loadavg=load_average(), **SOFTWARE_INFO) def start(self): if self.eventer.enabled: self._send('worker-online') - self.tref = self.timer.apply_interval( - self.interval * 1000.0, self._send, ('worker-heartbeat', ), + self.tref = self.timer.call_repeatedly( + self.interval, self._send, ('worker-heartbeat', ), ) def stop(self): diff --git a/awx/lib/site-packages/celery/worker/hub.py b/awx/lib/site-packages/celery/worker/hub.py deleted file mode 100644 index 7c1c17a483..0000000000 --- a/awx/lib/site-packages/celery/worker/hub.py +++ /dev/null @@ -1,222 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.hub - ~~~~~~~~~~~~~~~~~ - - Event-loop implementation. - -""" -from __future__ import absolute_import - -from kombu.utils import cached_property -from kombu.utils import eventio - -from celery.utils.log import get_logger -from celery.utils.timer2 import Schedule - -logger = get_logger(__name__) -READ, WRITE, ERR = eventio.READ, eventio.WRITE, eventio.ERR - - -class BoundedSemaphore(object): - """Asynchronous Bounded Semaphore. - - Bounded means that the value will stay within the specified - range even if it is released more times than it was acquired. - - This type is *not thread safe*. - - Example: - - >>> x = BoundedSemaphore(2) - - >>> def callback(i): - ... print('HELLO %r' % i) - - >>> x.acquire(callback, 1) - HELLO 1 - - >>> x.acquire(callback, 2) - HELLO 2 - - >>> x.acquire(callback, 3) - >>> x._waiters # private, do not access directly - [(callback, 3)] - - >>> x.release() - HELLO 3 - - """ - - def __init__(self, value): - self.initial_value = self.value = value - self._waiting = [] - - def acquire(self, callback, *partial_args): - """Acquire semaphore, applying ``callback`` when - the semaphore is ready. - - :param callback: The callback to apply. - :param \*partial_args: partial arguments to callback. - - """ - if self.value <= 0: - self._waiting.append((callback, partial_args)) - return False - else: - self.value = max(self.value - 1, 0) - callback(*partial_args) - return True - - def release(self): - """Release semaphore. - - This will apply any waiting callbacks from previous - calls to :meth:`acquire` done when the semaphore was busy. - - """ - self.value = min(self.value + 1, self.initial_value) - if self._waiting: - waiter, args = self._waiting.pop() - waiter(*args) - - def grow(self, n=1): - """Change the size of the semaphore to hold more values.""" - self.initial_value += n - self.value += n - [self.release() for _ in xrange(n)] - - def shrink(self, n=1): - """Change the size of the semaphore to hold less values.""" - self.initial_value = max(self.initial_value - n, 0) - self.value = max(self.value - n, 0) - - def clear(self): - """Reset the sempahore, including wiping out any waiting callbacks.""" - self._waiting[:] = [] - self.value = self.initial_value - - -class Hub(object): - """Event loop object. - - :keyword timer: Specify custom :class:`~celery.utils.timer2.Schedule`. - - """ - #: Flag set if reading from an fd will not block. - READ = READ - - #: Flag set if writing to an fd will not block. - WRITE = WRITE - - #: Flag set on error, and the fd should be read from asap. - ERR = ERR - - #: List of callbacks to be called when the loop is initialized, - #: applied with the hub instance as sole argument. - on_init = None - - #: List of callbacks to be called when the loop is exiting, - #: applied with the hub instance as sole argument. - on_close = None - - #: List of callbacks to be called when a task is received. - #: Takes no arguments. - on_task = None - - def __init__(self, timer=None): - self.timer = Schedule() if timer is None else timer - - self.readers = {} - self.writers = {} - self.on_init = [] - self.on_close = [] - self.on_task = [] - - def start(self): - """Called by StartStopComponent at worker startup.""" - self.poller = eventio.poll() - - def stop(self): - """Called by StartStopComponent at worker shutdown.""" - self.poller.close() - - def init(self): - for callback in self.on_init: - callback(self) - - def fire_timers(self, min_delay=1, max_delay=10, max_timers=10, - propagate=()): - delay = None - if self.timer._queue: - for i in range(max_timers): - delay, entry = self.scheduler.next() - if entry is None: - break - try: - entry() - except propagate: - raise - except Exception, exc: - logger.error('Error in timer: %r', exc, exc_info=1) - return min(max(delay or 0, min_delay), max_delay) - - def add(self, fd, callback, flags): - self.poller.register(fd, flags) - if not isinstance(fd, int): - fd = fd.fileno() - if flags & READ: - self.readers[fd] = callback - if flags & WRITE: - self.writers[fd] = callback - - def add_reader(self, fd, callback): - return self.add(fd, callback, READ | ERR) - - def add_writer(self, fd, callback): - return self.add(fd, callback, WRITE) - - def update_readers(self, readers): - [self.add_reader(*x) for x in readers.iteritems()] - - def update_writers(self, writers): - [self.add_writer(*x) for x in writers.iteritems()] - - def _unregister(self, fd): - try: - self.poller.unregister(fd) - except (KeyError, OSError): - pass - - def remove(self, fd): - fileno = fd.fileno() if not isinstance(fd, int) else fd - self.readers.pop(fileno, None) - self.writers.pop(fileno, None) - self._unregister(fd) - - def __enter__(self): - self.init() - return self - - def close(self, *args): - [self._unregister(fd) for fd in self.readers] - self.readers.clear() - [self._unregister(fd) for fd in self.writers] - self.writers.clear() - for callback in self.on_close: - callback(self) - __exit__ = close - - @cached_property - def scheduler(self): - return iter(self.timer) - - -class DummyLock(object): - """Pretending to be a lock.""" - - def __enter__(self): - return self - - def __exit__(self, *exc_info): - pass diff --git a/awx/lib/site-packages/celery/worker/job.py b/awx/lib/site-packages/celery/worker/job.py index 3bfe45e983..f324ab3359 100644 --- a/awx/lib/site-packages/celery/worker/job.py +++ b/awx/lib/site-packages/celery/worker/job.py @@ -7,31 +7,28 @@ which specifies how tasks are executed. """ -from __future__ import absolute_import +from __future__ import absolute_import, unicode_literals import logging -import time import socket import sys +from billiard.einfo import ExceptionInfo from datetime import datetime +from weakref import ref from kombu.utils import kwdict, reprcall from kombu.utils.encoding import safe_repr, safe_str from celery import signals -from celery.app import app_or_default -from celery.datastructures import ExceptionInfo +from celery.app.trace import trace_task, trace_task_ret from celery.exceptions import ( Ignore, TaskRevokedError, InvalidTaskError, SoftTimeLimitExceeded, TimeLimitExceeded, - WorkerLostError, Terminated, RetryTaskError, + WorkerLostError, Terminated, Retry, Reject, ) +from celery.five import items, monotonic, string_t from celery.platforms import signals as _signals -from celery.task.trace import ( - trace_task, - trace_task_ret, -) from celery.utils import fun_takes_kwargs from celery.utils.functional import noop from celery.utils.log import get_logger @@ -41,6 +38,8 @@ from celery.utils.timeutils import maybe_iso8601, timezone, maybe_make_aware from . import state +__all__ = ['Request'] + IS_PYPY = hasattr(sys, 'pypy_version_info') logger = get_logger(__name__) @@ -51,6 +50,7 @@ _does_debug = False def __optimize__(): + # this is also called by celery.app.trace.setup_worker_optimizations global _does_debug global _does_info _does_debug = logger.isEnabledFor(logging.DEBUG) @@ -68,17 +68,30 @@ revoked_tasks = state.revoked NEEDS_KWDICT = sys.version_info <= (2, 6) +#: Use when no message object passed to :class:`Request`. +DEFAULT_FIELDS = { + 'headers': None, + 'reply_to': None, + 'correlation_id': None, + 'delivery_info': { + 'exchange': None, + 'routing_key': None, + 'priority': 0, + 'redelivered': False, + }, +} + class Request(object): """A request for task execution.""" - if not IS_PYPY: + if not IS_PYPY: # pragma: no cover __slots__ = ( - 'app', 'name', 'id', 'args', 'kwargs', 'on_ack', 'delivery_info', + 'app', 'name', 'id', 'args', 'kwargs', 'on_ack', 'hostname', 'eventer', 'connection_errors', 'task', 'eta', - 'expires', 'request_dict', 'acknowledged', + 'expires', 'request_dict', 'acknowledged', 'on_reject', 'utc', 'time_start', 'worker_pid', '_already_revoked', - '_terminate_on_ack', - '_tzlocal', '__weakref__', + '_terminate_on_ack', '_apply_result', + '_tzlocal', '__weakref__', '__dict__', ) #: Format string used to log task success. @@ -100,14 +113,18 @@ class Request(object): Task %(name)s[%(id)s] ignored """ + rejected_msg = """\ + Task %(name)s[%(id)s] %(exc)s + """ + #: Format string used to log task retry. retry_msg = """Task %(name)s[%(id)s] retry: %(exc)s""" def __init__(self, body, on_ack=noop, hostname=None, eventer=None, app=None, connection_errors=None, request_dict=None, - delivery_info=None, task=None, **opts): - self.app = app or app_or_default(app) + message=None, task=None, on_reject=noop, **opts): + self.app = app name = self.name = body['task'] self.id = body['id'] self.args = body.get('args', []) @@ -123,12 +140,14 @@ class Request(object): expires = body.get('expires') utc = self.utc = body.get('utc', False) self.on_ack = on_ack + self.on_reject = on_reject self.hostname = hostname or socket.gethostname() self.eventer = eventer self.connection_errors = connection_errors or () self.task = task or self.app.tasks[name] self.acknowledged = self._already_revoked = False self.time_start = self.worker_pid = self._terminate_on_ack = None + self._apply_result = None self._tzlocal = None # timezone means the message is timezone-aware, and the only timezone @@ -136,9 +155,9 @@ class Request(object): if eta is not None: try: self.eta = maybe_iso8601(eta) - except (AttributeError, ValueError), exc: + except (AttributeError, ValueError, TypeError) as exc: raise InvalidTaskError( - 'invalid eta value %r: %s' % (eta, exc, )) + 'invalid eta value {0!r}: {1}'.format(eta, exc)) if utc: self.eta = maybe_make_aware(self.eta, self.tzlocal) else: @@ -146,33 +165,36 @@ class Request(object): if expires is not None: try: self.expires = maybe_iso8601(expires) - except (AttributeError, ValueError), exc: + except (AttributeError, ValueError, TypeError) as exc: raise InvalidTaskError( - 'invalid expires value %r: %s' % (expires, exc, )) + 'invalid expires value {0!r}: {1}'.format(expires, exc)) if utc: self.expires = maybe_make_aware(self.expires, self.tzlocal) else: self.expires = None - delivery_info = {} if delivery_info is None else delivery_info - self.delivery_info = { - 'exchange': delivery_info.get('exchange'), - 'routing_key': delivery_info.get('routing_key'), - 'priority': delivery_info.get('priority'), - } + if message: + delivery_info = message.delivery_info or {} + properties = message.properties or {} + body.update({ + 'headers': message.headers, + 'reply_to': properties.get('reply_to'), + 'correlation_id': properties.get('correlation_id'), + 'delivery_info': { + 'exchange': delivery_info.get('exchange'), + 'routing_key': delivery_info.get('routing_key'), + 'priority': delivery_info.get('priority'), + 'redelivered': delivery_info.get('redelivered'), + } - # amqplib transport adds the channel here for some reason, so need - # to remove it. - self.delivery_info.pop('channel', None) + }) + else: + body.update(DEFAULT_FIELDS) self.request_dict = body - @classmethod - def from_message(cls, message, body, **kwargs): - # should be deprecated - return Request( - body, - delivery_info=getattr(message, 'delivery_info', None), **kwargs - ) + @property + def delivery_info(self): + return self.request_dict['delivery_info'] def extend_with_default_kwargs(self): """Extend the tasks keyword arguments with standard task arguments. @@ -196,13 +218,13 @@ class Request(object): 'delivery_info': self.delivery_info} fun = self.task.run supported_keys = fun_takes_kwargs(fun, default_kwargs) - extend_with = dict((key, val) for key, val in default_kwargs.items() + extend_with = dict((key, val) for key, val in items(default_kwargs) if key in supported_keys) kwargs.update(extend_with) return kwargs def execute_using_pool(self, pool, **kwargs): - """Like :meth:`execute`, but using a worker pool. + """Used by the worker to send this task to the pool. :param pool: A :class:`celery.concurrency.base.TaskPool` instance. @@ -210,9 +232,10 @@ class Request(object): and ignored. """ + uuid = self.id task = self.task if self.revoked(): - raise TaskRevokedError(self.id) + raise TaskRevokedError(uuid) hostname = self.hostname kwargs = self.kwargs @@ -222,19 +245,26 @@ class Request(object): request.update({'hostname': hostname, 'is_eager': False, 'delivery_info': self.delivery_info, 'group': self.request_dict.get('taskset')}) - result = pool.apply_async(trace_task_ret, - args=(self.name, self.id, - self.args, kwargs, request), - accept_callback=self.on_accepted, - timeout_callback=self.on_timeout, - callback=self.on_success, - error_callback=self.on_failure, - soft_timeout=task.soft_time_limit, - timeout=task.time_limit) + timeout, soft_timeout = request.get('timelimit', (None, None)) + timeout = timeout or task.time_limit + soft_timeout = soft_timeout or task.soft_time_limit + result = pool.apply_async( + trace_task_ret, + args=(self.name, uuid, self.args, kwargs, request), + accept_callback=self.on_accepted, + timeout_callback=self.on_timeout, + callback=self.on_success, + error_callback=self.on_failure, + soft_timeout=soft_timeout, + timeout=timeout, + correlation_id=uuid, + ) + # cannot create weakref to None + self._apply_result = ref(result) if result is not None else result return result def execute(self, loglevel=None, logfile=None): - """Execute the task in a :func:`~celery.task.trace.trace_task`. + """Execute the task in a :func:`~celery.app.trace.trace_task`. :keyword loglevel: The loglevel used by the task. :keyword logfile: The logfile used by the task. @@ -269,23 +299,27 @@ class Request(object): return True def terminate(self, pool, signal=None): + signal = _signals.signum(signal or 'TERM') if self.time_start: - signal = _signals.signum(signal or 'TERM') pool.terminate_job(self.worker_pid, signal) self._announce_revoked('terminated', True, signal, False) else: self._terminate_on_ack = pool, signal + if self._apply_result is not None: + obj = self._apply_result() # is a weakref + if obj is not None: + obj.terminate(signal) def _announce_revoked(self, reason, terminated, signum, expired): task_ready(self) self.send_event('task-revoked', terminated=terminated, signum=signum, expired=expired) if self.store_errors: - self.task.backend.mark_as_revoked(self.id, reason) + self.task.backend.mark_as_revoked(self.id, reason, request=self) self.acknowledge() self._already_revoked = True - send_revoked(self.task, terminated=terminated, - signum=signum, expired=expired) + send_revoked(self.task, request=self, + terminated=terminated, signum=signum, expired=expired) def revoked(self): """If revoked, skip task and mark state.""" @@ -295,7 +329,7 @@ class Request(object): if self.expires: expired = self.maybe_expire() if self.id in revoked_tasks: - warn('Skipping revoked task: %s[%s]', self.name, self.id) + info('Discarding revoked task: %s[%s]', self.name, self.id) self._announce_revoked( 'expired' if expired else 'revoked', False, None, expired, ) @@ -313,7 +347,7 @@ class Request(object): task_accepted(self) if not self.task.acks_late: self.acknowledge() - self.send_event('task-started', pid=pid) + self.send_event('task-started') if _does_debug: debug('Task accepted: %s[%s] pid:%r', self.name, self.id, pid) if self._terminate_on_ack is not None: @@ -332,9 +366,9 @@ class Request(object): exc = TimeLimitExceeded(timeout) if self.store_errors: - self.task.backend.mark_as_failure(self.id, exc) + self.task.backend.mark_as_failure(self.id, exc, request=self) - def on_success(self, ret_value, now=None): + def on_success(self, ret_value, now=None, nowfun=monotonic): """Handler called if the task was successfully processed.""" if isinstance(ret_value, ExceptionInfo): if isinstance(ret_value.exception, ( @@ -347,14 +381,14 @@ class Request(object): self.acknowledge() if self.eventer and self.eventer.enabled: - now = time.time() - runtime = self.time_start and (time.time() - self.time_start) or 0 + now = nowfun() + runtime = self.time_start and (now - self.time_start) or 0 self.send_event('task-succeeded', result=safe_repr(ret_value), runtime=runtime) if _does_info: - now = now or time.time() - runtime = self.time_start and (time.time() - self.time_start) or 0 + now = now or nowfun() + runtime = self.time_start and (now - self.time_start) or 0 info(self.success_msg.strip(), { 'id': self.id, 'name': self.name, 'return_value': self.repr_result(ret_value), @@ -370,33 +404,37 @@ class Request(object): traceback=safe_str(exc_info.traceback)) if _does_info: - info(self.retry_msg.strip(), { - 'id': self.id, 'name': self.name, - 'exc': exc_info.exception}) + info(self.retry_msg.strip(), + {'id': self.id, 'name': self.name, + 'exc': exc_info.exception}) def on_failure(self, exc_info): """Handler called if the task raised an exception.""" task_ready(self) + send_failed_event = True if not exc_info.internal: exc = exc_info.exception - if isinstance(exc, RetryTaskError): + if isinstance(exc, Retry): return self.on_retry(exc_info) # These are special cases where the process would not have had # time to write the result. if self.store_errors: if isinstance(exc, WorkerLostError): - self.task.backend.mark_as_failure(self.id, exc) + self.task.backend.mark_as_failure( + self.id, exc, request=self, + ) elif isinstance(exc, Terminated): self._announce_revoked('terminated', True, str(exc), False) + send_failed_event = False # already sent revoked event # (acks_late) acknowledge after result stored. if self.task.acks_late: self.acknowledge() - self._log_error(exc_info) + self._log_error(exc_info, send_failed_event=send_failed_event) - def _log_error(self, einfo): + def _log_error(self, einfo, send_failed_event=True): einfo.exception = get_pickled_exception(einfo.exception) exception, traceback, exc_info, internal, sargs, skwargs = ( safe_repr(einfo.exception), @@ -409,12 +447,21 @@ class Request(object): format = self.error_msg description = 'raised exception' severity = logging.ERROR - self.send_event( - 'task-failed', exception=exception, traceback=traceback, - ) + if send_failed_event: + self.send_event( + 'task-failed', exception=exception, traceback=traceback, + ) if internal: - if isinstance(einfo.exception, Ignore): + if isinstance(einfo.exception, MemoryError): + raise MemoryError('Process got: %s' % (einfo.exception, )) + elif isinstance(einfo.exception, Reject): + format = self.rejected_msg + description = 'rejected' + severity = logging.WARN + exc_info = einfo + self.reject(requeue=einfo.exception.requeue) + elif isinstance(einfo.exception, Ignore): format = self.ignored_msg description = 'ignored' severity = logging.INFO @@ -453,10 +500,17 @@ class Request(object): self.on_ack(logger, self.connection_errors) self.acknowledged = True + def reject(self, requeue=False): + if not self.acknowledged: + self.on_reject(logger, self.connection_errors, requeue) + self.acknowledged = True + def repr_result(self, result, maxlen=46): # 46 is the length needed to fit # 'the quick brown fox jumps over the lazy dog' :) - return truncate(safe_repr(result), maxlen) + if not isinstance(result, string_t): + result = safe_repr(result) + return truncate(result) if len(result) > maxlen else result def info(self, safe=False): return {'id': self.id, @@ -470,17 +524,15 @@ class Request(object): 'worker_pid': self.worker_pid} def __str__(self): - return '%s[%s]%s%s' % ( - self.name, self.id, - ' eta:[%s]' % (self.eta, ) if self.eta else '', - ' expires:[%s]' % (self.expires, ) if self.expires else '') + return '{0.name}[{0.id}]{1}{2}'.format(self, + ' eta:[{0}]'.format(self.eta) if self.eta else '', + ' expires:[{0}]'.format(self.expires) if self.expires else '') shortinfo = __str__ def __repr__(self): - return '<%s %s: %s>' % ( + return '<{0} {1}: {2}>'.format( type(self).__name__, self.id, - reprcall(self.name, self.args, self.kwargs), - ) + reprcall(self.name, self.args, self.kwargs)) @property def tzlocal(self): @@ -493,28 +545,30 @@ class Request(object): return (not self.task.ignore_result or self.task.store_errors_even_if_ignored) - def _compat_get_task_id(self): + @property + def task_id(self): + # XXX compat return self.id - def _compat_set_task_id(self, value): + @task_id.setter # noqa + def task_id(self, value): self.id = value - task_id = property(_compat_get_task_id, _compat_set_task_id) - def _compat_get_task_name(self): + @property + def task_name(self): + # XXX compat return self.name - def _compat_set_task_name(self, value): + @task_name.setter # noqa + def task_name(self, value): self.name = value - task_name = property(_compat_get_task_name, _compat_set_task_name) + @property + def reply_to(self): + # used by rpc backend when failures reported by parent process + return self.request_dict['reply_to'] -class TaskRequest(Request): - - def __init__(self, name, id, args=(), kwargs={}, - eta=None, expires=None, **options): - """Compatibility class.""" - - super(TaskRequest, self).__init__({ - 'task': name, 'id': id, 'args': args, - 'kwargs': kwargs, 'eta': eta, - 'expires': expires}, **options) + @property + def correlation_id(self): + # used similarly to reply_to + return self.request_dict['correlation_id'] diff --git a/awx/lib/site-packages/celery/worker/loops.py b/awx/lib/site-packages/celery/worker/loops.py new file mode 100644 index 0000000000..12842ffb1c --- /dev/null +++ b/awx/lib/site-packages/celery/worker/loops.py @@ -0,0 +1,104 @@ +""" +celery.worker.loop +~~~~~~~~~~~~~~~~~~ + +The consumers highly-optimized inner loop. + +""" +from __future__ import absolute_import + +import socket + +from celery.bootsteps import RUN +from celery.exceptions import SystemTerminate, WorkerLostError +from celery.utils.log import get_logger + +from . import state + +__all__ = ['asynloop', 'synloop'] + +logger = get_logger(__name__) +error = logger.error + + +def asynloop(obj, connection, consumer, blueprint, hub, qos, + heartbeat, clock, hbrate=2.0, RUN=RUN): + """Non-blocking event loop consuming messages until connection is lost, + or shutdown is requested.""" + + update_qos = qos.update + readers, writers = hub.readers, hub.writers + hbtick = connection.heartbeat_check + errors = connection.connection_errors + hub_add, hub_remove = hub.add, hub.remove + + on_task_received = obj.create_task_handler() + + if heartbeat and connection.supports_heartbeats: + hub.call_repeatedly(heartbeat / hbrate, hbtick, hbrate) + + consumer.callbacks = [on_task_received] + consumer.consume() + obj.on_ready() + obj.controller.register_with_event_loop(hub) + obj.register_with_event_loop(hub) + + # did_start_ok will verify that pool processes were able to start, + # but this will only work the first time we start, as + # maxtasksperchild will mess up metrics. + if not obj.restart_count and not obj.pool.did_start_ok(): + raise WorkerLostError('Could not start worker processes') + + # FIXME: Use loop.run_forever + # Tried and works, but no time to test properly before release. + hub.propagate_errors = errors + loop = hub.create_loop() + + try: + while blueprint.state == RUN and obj.connection: + # shutdown if signal handlers told us to. + if state.should_stop: + raise SystemExit() + elif state.should_terminate: + raise SystemTerminate() + + # We only update QoS when there is no more messages to read. + # This groups together qos calls, and makes sure that remote + # control commands will be prioritized over task messages. + if qos.prev != qos.value: + update_qos() + + try: + next(loop) + except StopIteration: + loop = hub.create_loop() + finally: + try: + hub.close() + except Exception as exc: + error( + 'Error cleaning up after event loop: %r', exc, exc_info=1, + ) + + +def synloop(obj, connection, consumer, blueprint, hub, qos, + heartbeat, clock, hbrate=2.0, **kwargs): + """Fallback blocking event loop for transports that doesn't support AIO.""" + + on_task_received = obj.create_task_handler() + consumer.register_callback(on_task_received) + consumer.consume() + + obj.on_ready() + + while blueprint.state == RUN and obj.connection: + state.maybe_shutdown() + if qos.prev != qos.value: + qos.update() + try: + connection.drain_events(timeout=2.0) + except socket.timeout: + pass + except socket.error: + if blueprint.state == RUN: + raise diff --git a/awx/lib/site-packages/celery/worker/mediator.py b/awx/lib/site-packages/celery/worker/mediator.py deleted file mode 100644 index 0e10392797..0000000000 --- a/awx/lib/site-packages/celery/worker/mediator.py +++ /dev/null @@ -1,80 +0,0 @@ -# -*- coding: utf-8 -*- -""" - celery.worker.mediator - ~~~~~~~~~~~~~~~~~~~~~~ - - The mediator is an internal thread that moves tasks - from an internal :class:`Queue` to the worker pool. - - This is only used if rate limits are enabled, as it moves - messages from the rate limited queue (which holds tasks - that are allowed to be processed) to the pool. Disabling - rate limits will also disable this machinery, - and can improve performance. - -""" -from __future__ import absolute_import - -import logging - -from Queue import Empty - -from celery.app import app_or_default -from celery.utils.threads import bgThread -from celery.utils.log import get_logger - -from .bootsteps import StartStopComponent - -logger = get_logger(__name__) - - -class WorkerComponent(StartStopComponent): - name = 'worker.mediator' - requires = ('pool', 'queues', ) - - def __init__(self, w, **kwargs): - w.mediator = None - - def include_if(self, w): - return w.start_mediator - - def create(self, w): - m = w.mediator = self.instantiate(w.mediator_cls, w.ready_queue, - app=w.app, callback=w.process_task) - return m - - -class Mediator(bgThread): - """Mediator thread.""" - - #: The task queue, a :class:`~Queue.Queue` instance. - ready_queue = None - - #: Callback called when a task is obtained. - callback = None - - def __init__(self, ready_queue, callback, app=None, **kw): - self.app = app_or_default(app) - self.ready_queue = ready_queue - self.callback = callback - self._does_debug = logger.isEnabledFor(logging.DEBUG) - super(Mediator, self).__init__() - - def body(self): - try: - task = self.ready_queue.get(timeout=1.0) - except Empty: - return - - if self._does_debug: - logger.debug('Mediator: Running callback for task: %s[%s]', - task.name, task.id) - - try: - self.callback(task) - except Exception, exc: - logger.error('Mediator callback raised exception %r', - exc, exc_info=True, - extra={'data': {'id': task.id, - 'name': task.name, - 'hostname': task.hostname}}) diff --git a/awx/lib/site-packages/celery/worker/pidbox.py b/awx/lib/site-packages/celery/worker/pidbox.py new file mode 100644 index 0000000000..1165361e58 --- /dev/null +++ b/awx/lib/site-packages/celery/worker/pidbox.py @@ -0,0 +1,114 @@ +from __future__ import absolute_import + +import socket +import threading + +from kombu.common import ignore_errors +from kombu.utils.encoding import safe_str + +from celery.datastructures import AttributeDict +from celery.utils.log import get_logger + +from . import control + +__all__ = ['Pidbox', 'gPidbox'] + +logger = get_logger(__name__) +debug, error, info = logger.debug, logger.error, logger.info + + +class Pidbox(object): + consumer = None + + def __init__(self, c): + self.c = c + self.hostname = c.hostname + self.node = c.app.control.mailbox.Node( + safe_str(c.hostname), + handlers=control.Panel.data, + state=AttributeDict(app=c.app, hostname=c.hostname, consumer=c), + ) + self._forward_clock = self.c.app.clock.forward + + def on_message(self, body, message): + self._forward_clock() # just increase clock as clients usually don't + # have a valid clock to adjust with. + try: + self.node.handle_message(body, message) + except KeyError as exc: + error('No such control command: %s', exc) + except Exception as exc: + error('Control command error: %r', exc, exc_info=True) + self.reset() + + def start(self, c): + self.node.channel = c.connection.channel() + self.consumer = self.node.listen(callback=self.on_message) + + def on_stop(self): + pass + + def stop(self, c): + self.on_stop() + self.consumer = self._close_channel(c) + + def reset(self): + """Sets up the process mailbox.""" + self.stop(self.c) + self.start(self.c) + + def _close_channel(self, c): + if self.node and self.node.channel: + ignore_errors(c, self.node.channel.close) + + def shutdown(self, c): + self.on_stop() + if self.consumer: + debug('Cancelling broadcast consumer...') + ignore_errors(c, self.consumer.cancel) + self.stop(self.c) + + +class gPidbox(Pidbox): + _node_shutdown = None + _node_stopped = None + _resets = 0 + + def start(self, c): + c.pool.spawn_n(self.loop, c) + + def on_stop(self): + if self._node_stopped: + self._node_shutdown.set() + debug('Waiting for broadcast thread to shutdown...') + self._node_stopped.wait() + self._node_stopped = self._node_shutdown = None + + def reset(self): + self._resets += 1 + + def _do_reset(self, c, connection): + self._close_channel(c) + self.node.channel = connection.channel() + self.consumer = self.node.listen(callback=self.on_message) + self.consumer.consume() + + def loop(self, c): + resets = [self._resets] + shutdown = self._node_shutdown = threading.Event() + stopped = self._node_stopped = threading.Event() + try: + with c.connect() as connection: + + info('pidbox: Connected to %s.', connection.as_uri()) + self._do_reset(c, connection) + while not shutdown.is_set() and c.connection: + if resets[0] < self._resets: + resets[0] += 1 + self._do_reset(c, connection) + try: + connection.drain_events(timeout=1.0) + except socket.timeout: + pass + finally: + stopped.set() diff --git a/awx/lib/site-packages/celery/worker/state.py b/awx/lib/site-packages/celery/worker/state.py index 10fe53b303..6642ecce53 100644 --- a/awx/lib/site-packages/celery/worker/state.py +++ b/awx/lib/site-packages/celery/worker/state.py @@ -15,14 +15,20 @@ import os import sys import platform import shelve +import zlib -from collections import defaultdict - -from kombu.serialization import pickle_protocol +from kombu.serialization import pickle, pickle_protocol from kombu.utils import cached_property from celery import __version__ from celery.datastructures import LimitedSet +from celery.exceptions import SystemTerminate +from celery.five import Counter + +__all__ = ['SOFTWARE_INFO', 'reserved_requests', 'active_requests', + 'total_count', 'revoked', 'task_reserved', 'maybe_shutdown', + 'task_accepted', 'task_ready', 'task_reserved', 'task_ready', + 'Persistent'] #: Worker software/platform information. SOFTWARE_INFO = {'sw_ident': 'py-celery', @@ -30,11 +36,11 @@ SOFTWARE_INFO = {'sw_ident': 'py-celery', 'sw_sys': platform.system()} #: maximum number of revokes to keep in memory. -REVOKES_MAX = 10000 +REVOKES_MAX = 50000 #: how many seconds a revoke will be active before #: being expired when the max limit has been exceeded. -REVOKE_EXPIRES = 3600 +REVOKE_EXPIRES = 10800 #: set of all reserved :class:`~celery.worker.job.Request`'s. reserved_requests = set() @@ -42,19 +48,26 @@ reserved_requests = set() #: set of currently active :class:`~celery.worker.job.Request`'s. active_requests = set() -#: count of tasks executed by the worker, sorted by type. -total_count = defaultdict(int) +#: count of tasks accepted by the worker, sorted by type. +total_count = Counter() #: the list of currently revoked tasks. Persistent if statedb set. revoked = LimitedSet(maxlen=REVOKES_MAX, expires=REVOKE_EXPIRES) -#: Updates global state when a task has been reserved. +#: Update global state when a task has been reserved. task_reserved = reserved_requests.add should_stop = False should_terminate = False +def maybe_shutdown(): + if should_stop: + raise SystemExit() + elif should_terminate: + raise SystemTerminate() + + def task_accepted(request): """Updates global state when a task has been accepted.""" active_requests.add(request) @@ -73,8 +86,8 @@ C_BENCH_EVERY = int(os.environ.get('C_BENCH_EVERY') or if C_BENCH: # pragma: no cover import atexit - from time import time from billiard import current_process + from celery.five import monotonic from celery.utils.debug import memdump, sample_mem all_count = 0 @@ -90,9 +103,10 @@ if C_BENCH: # pragma: no cover @atexit.register def on_shutdown(): if bench_first is not None and bench_last is not None: - print('- Time spent in benchmark: %r' % ( - bench_last - bench_first)) - print('- Avg: %s' % (sum(bench_sample) / len(bench_sample))) + print('- Time spent in benchmark: {0!r}'.format( + bench_last - bench_first)) + print('- Avg: {0}'.format( + sum(bench_sample) / len(bench_sample))) memdump() def task_reserved(request): # noqa @@ -100,7 +114,7 @@ if C_BENCH: # pragma: no cover global bench_first now = None if bench_start is None: - bench_start = now = time() + bench_start = now = monotonic() if bench_first is None: bench_first = now @@ -112,10 +126,10 @@ if C_BENCH: # pragma: no cover global bench_last all_count += 1 if not all_count % bench_every: - now = time() + now = monotonic() diff = now - bench_start - print('- Time spent processing %s tasks (since first ' - 'task received): ~%.4fs\n' % (bench_every, diff)) + print('- Time spent processing {0} tasks (since first ' + 'task received): ~{1:.4f}s\n'.format(bench_every, diff)) sys.stdout.flush() bench_start = bench_last = now bench_sample.append(diff) @@ -132,44 +146,87 @@ class Persistent(object): """ storage = shelve protocol = pickle_protocol + compress = zlib.compress + decompress = zlib.decompress _is_open = False - def __init__(self, filename): + def __init__(self, state, filename, clock=None): + self.state = state self.filename = filename - self._load() - - def save(self): - self.sync(self.db) - self.db.sync() - self.close() - - def merge(self, d): - saved = d.get('revoked') or LimitedSet() - if isinstance(saved, LimitedSet): - revoked.update(saved) - else: - # (pre 3.0.18) used to be stored as dict - for item in saved: - revoked.add(item) - return d - - def sync(self, d): - revoked.purge() - d['revoked'] = revoked - return d + self.clock = clock + self.merge() def open(self): return self.storage.open( self.filename, protocol=self.protocol, writeback=True, ) + def merge(self): + self._merge_with(self.db) + + def sync(self): + self._sync_with(self.db) + self.db.sync() + def close(self): if self._is_open: self.db.close() self._is_open = False - def _load(self): - self.merge(self.db) + def save(self): + self.sync() + self.close() + + def _merge_with(self, d): + self._merge_revoked(d) + self._merge_clock(d) + return d + + def _sync_with(self, d): + self._revoked_tasks.purge() + d.update( + __proto__=3, + zrevoked=self.compress(self._dumps(self._revoked_tasks)), + clock=self.clock.forward() if self.clock else 0, + ) + return d + + def _merge_clock(self, d): + if self.clock: + d['clock'] = self.clock.adjust(d.get('clock') or 0) + + def _merge_revoked(self, d): + try: + self._merge_revoked_v3(d['zrevoked']) + except KeyError: + try: + self._merge_revoked_v2(d.pop('revoked')) + except KeyError: + pass + # purge expired items at boot + self._revoked_tasks.purge() + + def _merge_revoked_v3(self, zrevoked): + if zrevoked: + self._revoked_tasks.update(pickle.loads(self.decompress(zrevoked))) + + def _merge_revoked_v2(self, saved): + if not isinstance(saved, LimitedSet): + # (pre 3.0.18) used to be stored as a dict + return self._merge_revoked_v1(saved) + self._revoked_tasks.update(saved) + + def _merge_revoked_v1(self, saved): + add = self._revoked_tasks.add + for item in saved: + add(item) + + def _dumps(self, obj): + return pickle.dumps(obj, protocol=self.protocol) + + @property + def _revoked_tasks(self): + return self.state.revoked @cached_property def db(self): diff --git a/awx/lib/site-packages/celery/worker/strategy.py b/awx/lib/site-packages/celery/worker/strategy.py index 4e4b7cefd3..0b0d327c31 100644 --- a/awx/lib/site-packages/celery/worker/strategy.py +++ b/awx/lib/site-packages/celery/worker/strategy.py @@ -8,19 +8,82 @@ """ from __future__ import absolute_import +import logging + +from kombu.async.timer import to_timestamp +from kombu.utils.encoding import safe_repr + +from celery.utils.log import get_logger +from celery.utils.timeutils import timezone + from .job import Request +from .state import task_reserved + +__all__ = ['default'] + +logger = get_logger(__name__) -def default(task, app, consumer): +def default(task, app, consumer, + info=logger.info, error=logger.error, task_reserved=task_reserved, + to_system_tz=timezone.to_system): hostname = consumer.hostname eventer = consumer.event_dispatcher Req = Request - handle = consumer.on_task connection_errors = consumer.connection_errors + _does_info = logger.isEnabledFor(logging.INFO) + events = eventer and eventer.enabled + send_event = eventer.send + call_at = consumer.timer.call_at + apply_eta_task = consumer.apply_eta_task + rate_limits_enabled = not consumer.disable_rate_limits + bucket = consumer.task_buckets[task.name] + handle = consumer.on_task_request + limit_task = consumer._limit_task + + def task_message_handler(message, body, ack, reject, callbacks, + to_timestamp=to_timestamp): + req = Req(body, on_ack=ack, on_reject=reject, + app=app, hostname=hostname, + eventer=eventer, task=task, + connection_errors=connection_errors, + message=message) + if req.revoked(): + return + + if _does_info: + info('Received task: %s', req) + + if events: + send_event( + 'task-received', + uuid=req.id, name=req.name, + args=safe_repr(req.args), kwargs=safe_repr(req.kwargs), + retries=req.request_dict.get('retries', 0), + eta=req.eta and req.eta.isoformat(), + expires=req.expires and req.expires.isoformat(), + ) + + if req.eta: + try: + if req.utc: + eta = to_timestamp(to_system_tz(req.eta)) + else: + eta = to_timestamp(req.eta, timezone.local) + except OverflowError as exc: + error("Couldn't convert eta %s to timestamp: %r. Task: %r", + req.eta, exc, req.info(safe=True), exc_info=True) + req.acknowledge() + else: + consumer.qos.increment_eventually() + call_at(eta, apply_eta_task, (req, ), priority=6) + else: + if rate_limits_enabled: + if bucket: + return limit_task(req, bucket, 1) + task_reserved(req) + if callbacks: + [callback() for callback in callbacks] + handle(req) - def task_message_handler(message, body, ack): - handle(Req(body, on_ack=ack, app=app, hostname=hostname, - eventer=eventer, task=task, - connection_errors=connection_errors, - delivery_info=message.delivery_info)) return task_message_handler diff --git a/awx/lib/site-packages/dateutil/__init__.py b/awx/lib/site-packages/dateutil/__init__.py index 0f91a31f6e..1020e72919 100644 --- a/awx/lib/site-packages/dateutil/__init__.py +++ b/awx/lib/site-packages/dateutil/__init__.py @@ -7,4 +7,4 @@ datetime module. """ __author__ = "Tomi Pieviläinen " __license__ = "Simplified BSD" -__version__ = "2.1" +__version__ = "2.2" diff --git a/awx/lib/site-packages/dateutil/parser.py b/awx/lib/site-packages/dateutil/parser.py index a2604a35ba..aef836238c 100644 --- a/awx/lib/site-packages/dateutil/parser.py +++ b/awx/lib/site-packages/dateutil/parser.py @@ -174,7 +174,7 @@ class parserinfo(object): # m from a.m/p.m, t from ISO T separator JUMP = [" ", ".", ",", ";", "-", "/", "'", "at", "on", "and", "ad", "m", "t", "of", - "st", "nd", "rd", "th"] + "st", "nd", "rd", "th"] WEEKDAYS = [("Mon", "Monday"), ("Tue", "Tuesday"), @@ -305,7 +305,10 @@ class parser(object): if not default: default = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) - res = self._parse(timestr, **kwargs) + + + res, skipped_tokens = self._parse(timestr, **kwargs) + if res is None: raise ValueError("unknown string format") repl = {} @@ -339,6 +342,10 @@ class parser(object): ret = ret.replace(tzinfo=tz.tzutc()) elif res.tzoffset: ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) + + if skipped_tokens: + return ret, skipped_tokens + return ret class _result(_resultbase): @@ -346,7 +353,10 @@ class parser(object): "hour", "minute", "second", "microsecond", "tzname", "tzoffset"] - def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False): + def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False, fuzzy_with_tokens=False): + if fuzzy_with_tokens: + fuzzy = True + info = self.info if dayfirst is None: dayfirst = info.dayfirst @@ -354,6 +364,13 @@ class parser(object): yearfirst = info.yearfirst res = self._result() l = _timelex.split(timestr) + + + # keep up with the last token skipped so we can recombine + # consecutively skipped tokens (-2 for when i begins at 0). + last_skipped_token_i = -2 + skipped_tokens = list() + try: # year/month/day list @@ -387,7 +404,7 @@ class parser(object): res.minute = int(s[2:]) elif len_li == 6 or (len_li > 6 and l[i-1].find('.') == 6): # YYMMDD or HHMMSS[.ss] - s = l[i-1] + s = l[i-1] if not ymd and l[i-1].find('.') == -1: ymd.append(info.convertyear(int(s[:2]))) ymd.append(int(s[2:4])) @@ -636,6 +653,13 @@ class parser(object): if not (info.jump(l[i]) or fuzzy): return None + if last_skipped_token_i == i - 1: + # recombine the tokens + skipped_tokens[-1] += l[i] + else: + # just append + skipped_tokens.append(l[i]) + last_skipped_token_i = i i += 1 # Process year/month/day @@ -705,7 +729,11 @@ class parser(object): if not info.validate(res): return None - return res + + if fuzzy_with_tokens: + return res, tuple(skipped_tokens) + + return res, None DEFAULTPARSER = parser() def parse(timestr, parserinfo=None, **kwargs): @@ -888,7 +916,7 @@ class _tzparser(object): except (IndexError, ValueError, AssertionError): return None - + return res diff --git a/awx/lib/site-packages/dateutil/zoneinfo/__init__.py b/awx/lib/site-packages/dateutil/zoneinfo/__init__.py index a1b34874ba..81db1405b1 100644 --- a/awx/lib/site-packages/dateutil/zoneinfo/__init__.py +++ b/awx/lib/site-packages/dateutil/zoneinfo/__init__.py @@ -5,9 +5,12 @@ Copyright (c) 2003-2005 Gustavo Niemeyer This module offers extensions to the standard Python datetime module. """ -from dateutil.tz import tzfile -from tarfile import TarFile +import logging import os +from subprocess import call +from tarfile import TarFile + +from dateutil.tz import tzfile __author__ = "Tomi Pieviläinen " __license__ = "Simplified BSD" @@ -58,6 +61,11 @@ def gettz(name): return tzinfo def rebuild(filename, tag=None, format="gz"): + """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar* + + filename is the timezone tarball from ftp.iana.org/tz. + + """ import tempfile, shutil tmpdir = tempfile.mkdtemp() zonedir = os.path.join(tmpdir, "zoneinfo") @@ -75,7 +83,18 @@ def rebuild(filename, tag=None, format="gz"): name == "leapseconds"): tf.extract(name, tmpdir) filepath = os.path.join(tmpdir, name) - os.system("zic -d %s %s" % (zonedir, filepath)) + try: + # zic will return errors for nontz files in the package + # such as the Makefile or README, so check_call cannot + # be used (or at least extra checks would be needed) + call(["zic", "-d", zonedir, filepath]) + except OSError as e: + if e.errno == 2: + logging.error( + "Could not find zic. Perhaps you need to install " + "libc-bin or some other package that provides it, " + "or it's not in your PATH?") + raise tf.close() target = os.path.join(moduledir, targetname) for entry in os.listdir(moduledir): diff --git a/awx/lib/site-packages/dateutil/zoneinfo/zoneinfo--latest.tar.gz b/awx/lib/site-packages/dateutil/zoneinfo/zoneinfo--latest.tar.gz index 12eadffb098afd7cc74615e72f7480c4d043c4c3..039d06f979b1a1f0c6e689122391ffd19816e694 100644 GIT binary patch literal 198578 zcmX6@bzD@>*A_v#Qv~5lBi*>9w19$imq;Vs-6^0*NG=`H0*iDBC`gBNr%2ZV%kDe; z-apQ{GjpEvoM+CRdl&Yz^zqo(m*#Go4^e*3PF8kK*3R7AjuswP?jAfI7H$vz1>yv} z+POmhejO3vavm&B6%9|lQ&T7>+MG!hb3ZaMDh;YM=qEzS4k@3rmT?B&JG zU(HVB+ne=w456}j5@sfXI^Uz-UtgwYsJk61&;R~%o4X^i134mn{MZTJO3xvQ&`zx? z?K~1pqhIbYmBLi#<#nc~m!4i29nJIeyBV*cm_80FVS0fX6Baezk9d_PH z?bDyt-_iRU92Y+SMmno54CuCnvKZ~-Kyb0R%TzkRy&#5dj$ERe!gIcyqW#lXid)xa-GI*O)L9_|&g)^n=BZ>Wx4k*{ErBC@x4E@K zt77K@gy9aITXWi)+qGY)Tjbv$w?0iew_=IDcS*Fbg_4Y<47Z>h_Bw==<6oUd%5`r4 zR_g|K`&RWg6dDfKEi@V`uh`oE)*P-MBAMen;hbwlip`ZCc+54O4}63EA)Y%LXNTl% z3URN>HyGc_eABfFBx^pbds($?v(#|2|JRnFUv%Qh2uE>-U)rB1302&pPHA!uq*|Iz zh9{Mjvzwn5xdueI%Me6vW2u5}F_V~(cY2wNhynFY%At;rMu%`+yg-ym^K`PW;Xd+i ztJZsEYTm=g$YDWL*I{Qx56_vpsF8PGCBDp5A^v-z{in#RU!S7!Y@a&oq={E`l(z?s zy*{kmNpB7CR6Z2@8@2d;o(J-EK?dUY<587=Z_O5+iI*+Em2`^bKJTR8wqbE!>P*9T zcfkhJB+^EBL%!h=*_t6Vvc@5>3IF45yN=mD%0L{FuV*kUv8+coR+5`NTVEzIR{tTJ z+((@*W#vD%KOB`T;%dL-PM61{{#~ol`EA9B{JzOR^jCY%9}|h%Go=MqCeXILpt(S@ z51R+rsmtl4c2KK4i=qt;J?J3_#ZXuDLwvFrBqY*-Nf^(aezY3uP2(mqfk_0+of2CQ z^?uX~w3rdyp`-6lFd0pYmNKG4IzljJ-5%F& zy)hqJ>q?Uggy1lI1stqe1dporDNR^>Kt@i_W^b;dOQ5db8Bjn$#+iu;~vE>lM&$Ql;+|i6i zgi0{lWJd_soSW^pt>cJdVyhtwT-E%+wBA(iXf7i{BN%OqBP3wX4ZD7e&w42TcUo@- zceK!Ia!)2V*a@MtKCUZ_cDt7RuOnn}&JAp&)@CSwG_4oPE%(4E3Jr$;$dQ_GR)D$I zO5c2FtSfCTlv@tNC<;eE4EG&(7#<9t+L2mhR=}gy3T{3`+>>Sz!7Ybl6h#KZf96Q7 zJ1fvoYvm(D$r58R#D}i>rZ;U3Gwr`)iEu5mtAh8|}_^?xYL)+jJhuO%Z8)jt3cXD}D`So<_y2H?Y^KDF-|h7YC|O z_QeyAw7oivWL7SQ-K@OLr~*g-{Tf`R+&K^2Nc($R5m-YXlT$=r*{%m6fBr(+IG{M$ zv@Rz%=m>qw*0kR3{HOWWth&O*hJzRfo7Xiv^%>TlG#6*V?10CZdWW|jWV#)OwCpS# z-0Nq#fXa4ijI>?8l-^ITh7Os?Ld@P(1tELnwt5ccZu*V{Q&giB|E71=KGV}x+S6Of zt$AJ|I#Y8&S7A&){knNv?T@pUL($?XUEzw8zG<@-pJJzxNVMCPqTt=~M*ZO3UFrqa zPWE~93I<0q|Nh=4!X_6YAjo@3L0>)&5j@M3dy`sPLvZ+Bu9tw}VE&}ZMeYrIoZ!Q3 zlux~zbs$5iwF%Woky4`E7Tvcknt?QDI-`zI$7@zF`IY9(YSfWm>!CGwnh;r>HI(Me z$F=kh7Em~MI&3{8n$5l90Snjw6PqFQ5N@J}FbU$>Q*-kn^e}EBT$lu(xt&Yx7TQDA z@Ca@qa+rkv>}iO_P^wL%)L-ib0 z*%JG24iWX<6*mKTB75%E!Ih&q8nS-|s&uwlq^EWUMs%fonO>WXaVVe~*3h>m?Fa~< zIv+Hpu4W6kDh*8xd`_y7krtiEzV4pE^)o`NUo#zp6YpYNJ+Zu;gd{p!a*q-7IudJ=oM-o@0VUk+bCAuk_e+Z1h}b znL%aKwAU^dgDQT*SEZR(ClKglzG?q|^KQ;_3kci^;jI(U1NnxxCFr!=sa z*vypTFwP)$Lpbcuu`AxT&2vK-RZ(zczQTD!2um$EI_`gaH$LNO&2~oF*Q`>M%;cEA zzHX@WtGuIXQ^HKeHT9OG+AfJgREd-2DxKwA!e6^D@KjE+rZFVh`9%~@u)fylN`DE} zV~+htk~{m6j5YgXMh{^xoS*4B=8qX2c5t7keTL$-TlLE4H~rpcI!_zVmvxSM7$IOH6o2#$u>z52Zze$TQy#t)TBktg6r-JjF?)CxUq2T$B}dmIB964e#D{7 zMwluv{4_^uzBvKgZ&s`ChE{P@9}K3+q;R7@Gs4u;f2a+^|5}St=}2unC(!lH3Vc?- z7)t9-<3{H(!Zd;5w>eS=%n3ZGw*sHrFMg+Wf8jD!^$nDEfgBoUe_FC8JzEM^|CunHADgLG?#y&Y1*;IeA48guCy96B!k3?fzNt(hEPc(nQM`U3!--2-<7{dNj;M*rrJMe_9H z2pe7fo{==OTAE~+RG>2s8chk$<~c{SPm;Mcx5MT1no#E+*Q1?CclDm%xDv`?K5>FR zDm*S9C9u`6pN*WJ37@1GtD~~n)Z2dQnEuD ziV5l#g>{!Sw6*g$Seo8Bd_Btqg*r%LyXrL2Y)T9F`=Qi{f0urUaKBm^;PglCs7?px zn(ABpZW$4tUh$9pH0%AGX-PM$JA=PMqxD!C5&cCKIUv2QNF_DLaU%T~^}cZ9mgrzc zL<%Bu0)?Wsj;@Unx1_A^22h*=D2ITVMhP~nT&u%*|Lum>rvW!O_x1Qe@Fbz(tGuD4L+2W?UC?_JwK9_mKyO4iK;v@h2 zc$J-&)SO+jHKNcql{D(@7Gk*1T~a9JV_cT1F|YMA-_y|MspZ?W2^zI8LzT~+o{1kB z)hA~ssnqyxyB*X}4_=ubm94fyRjQg3RU-8?Vuk0|PJC*LCx2S3{W=XNKfPGJz?&dq#UWW4U~b`Q=5KT=6(zeej5;HffGK zMz-&(;<2klF{6Ll54l~ZjF4lR!ArM^n4bl>2y8N*mSAKRkI!EHPB0p65{gyk6Ri@S zA)ReVfE@S#iOPF&M=NtHEfn#+$t$8Vpmz-Mt+cT3A9Y$-L*8Sh!Q>LMn#?j&tx&TI85*?D1=UC4_cAvB7o-lbc}be@S}cV%*QId?YPP z3eRSb@J%$MAuas@*1IC5^-LTdg)DwOZ``k*;BiT=n~Dy6Pip#sIW0P1dT14U#_o`~ zFtA8}4of$=wdAt}!cuqlybWcAORSZf5|B=4> z*%Gyz?qelu72`+#i1>n?la1SGP?{70@qEW`fwzpyv&WH9n~!{9O` zm?z-{`0H2-Pq-Aah-K)ycFM>Yf8xMt17$I;DW#P1iDk9{Zv^1Q0lb=;Lzhx&CYByqyLdhy2|tkxNc3X};tCxd=lzj%wI+y*ebS2$9e)u+f3$Wa$bo&r zgeFyp{)0#sTVEC@hAx7?WBv}l7 z1Us=bwhjP%0{{=S39_Cp(!m$!{}qL@G=%QT#Gtnt;0Gzv0iYBBh5{h24B|g#&=XsX zlW=03b*y|+z{CmwY5anF)Bt+QEFD3q~=2ivGjI~j}>zSoL}Xk9!d+6+yZKdEyEo^&D>OFKMKl96gZTs86)(TeYYq2Gd|RtAH#!O-8= zj&WBw`woRzS%X_r@3`p=`gM_0FB{*6bnBjU$xLrFPOv6j{HfHz&nV%Tl+vy+Ww)!C z8F*LawD9ihxI5HP3pJ%(F{k&{KEwQ!>#k+GLhf}|(-84ATVCt0okOQ-^>T~Tg&fC= z&cH6pj{-VXcfG<|Im(`uo=wdQe_2w#&7hpB%VjDCW)#VcUnRs#X=XLdS=zhpUi>N$L#e*4KcH<$jfk#?_g z6H5AePXgH2rDX%>_|rv;ghRnUGmkTVrVXXMDJ`?7)94n-gG&n6#m94>Eb|MOFGGwb zK3&+ml^?zTMU$kDC2jnc<{KO)b`HOVSy)QJI_iAu?0l{?W=NnNHc#f>!o=*2!}hkU zdEd?iLVTYt_Wp9H7+%FKW^Z^JjEB1GojrZ`SP-(4&m}hdv^F_BT1aeGsB*l!BtKz< zkHA^~$@(t-Bp(&$>y2Z7y2D-K$&X6WU!o+%eh8h9hZe6T5pqR75UVA}-0jk2&pT6D zd!Wh?I>H%w|NRG8rUT0{At_p4fRzFJ4(unkrqtukoiVg$rN^Pg#1Eg9@`V+@c}QLA z5?1^XT<*UJAuVcSM*0)-e{{nY1SSkj6c{A$zsRRAA(3>z=z;wQjDahX5nPzKBAI_e zB3ZzY6&M>Zc3{tdaRB24#x+45$^G(qBo8oNu1LP0qLKVukpkc%$Q3DMMCkC*1@}EL zXJD?t+<|!l^9JS%%pX`Fu;7m_VGllNg`w$(J^bh#hOQrm0WOd9!!SVr3m7&q9ALP> z@POe1BLGGSj0hMpFcSSRQjkTaA4U!?6#8M5@7lB0gZO`hM3DUkHU(@E*e0-lz|Mgo zfMJA2kl}Zel7|M6(RPzEg9}$TsZgDj&qExx7+_z3l>z$>>?g2U*%8KFaD~er>yd2) zdpOce4wY&HN~mB+lX$%N0#1vMe?2pPyM^@ ztieUO54DU4?$nFTFM2|_Z{V8qZAl|WF!kSs2Rt`&-k~r&*WtWu69)}Sdn)ex$2SbE4MhI7!%3eUyY~doR~jjXmif{-Pgp326p3} zbETf%grZPR!U%0tldo(I65UJ|aohPFxiYXI?th0G`hYSvFhhJ#_B8RiJUsN3GIiNr z%TD=pG-Bd!`@Q3)YUAvofYAsl&u2%yU&^uhzde~9bxQ%mpqa+qlhLesL!QduBX|Ms znNg6&&Fo3vh@}xvPy-U4JSl0!BguaS|CVFv#bqXa^WD?#cOnIJcn`7VKkQXW>%521 z=^PZ-z4*86>shvtSd%4ukga()ktm$X;c060qj9SxAR(!y)w6C(RF9L^EZD_nXbO@a zsO8{=S+=Hf5I3+a8h!)an(V%jpE+O3`L>N-lXi9FrvB?J14$ z;!34S+icbaW=$sAFe~KhPw_ry5@tM&j;#tmaDT|C-tUhIePY0PI`A>9yR=o}1ET!2 zYE@9CZ(@}4C)BHY;_lr2k5iF}3nVwl;;6eJ-bQnhw+b$Ha7rn8H`r9(t(RVt!QQkm=&? z<`kAs)u%<~qP@4DoD$Kxn-8{sN+>O>x;Hf@{BC7s;9e;}?wv-zpEy;=SLNlK60b6E z)AP$ujGV-cCRgDlbz!Tr-CEe|8>GPHQKMJI**I7w{o|yNCXqpBY=5-&bAo0DGu@z$ z_1nL;x$eQdB~de)CCL)wTME&kD-rs#Rq+e!zHN$NxGy`W*yfGmlhR8 zP0eXMp3;h9MP{89GpUmP?2w{NPWK;Ept;I9ZX*4HBS$5A>#)a1d2dAn=XnxOsyt97 z&_DkCNS*;_%oJk}vDH_9hv$iwAtpBQLG_1qL8bfLW>D>5ll@5o*5TLo&xg($o>_Qk z7S>h0ebRLJX^L4f!OoDA?^&%~=$9^${dCe!#b~m_sj79y4q=OzqDZ2wo zXJD;hxamDGDK3?pIb$bdf3r^=J|UJgJ=t31^>Y7-AZz>bh4Zqko zCF2(^AN-Sk&gPMJ*QKK~ZE#59Cob%fP#-0e&~{DviC7_#|W^VLV&u|_)*JhX0^9&8>eNU?+W#rTg6`9gj>j~@~Xr!3xMI@)?(bvehObjWmt0;qy2lSSpI+$hyAHJ3tBNo zC39XwNGe9kliIM533MYhlG+qjFw}g+RgAYQpCXV)rZFF)CSQ&&Mp8@!7G!IaLng3w zN#AgEt2|U;(^r1+>=7WVx+kLqWanWN7%6YfpADNoLMwivuOfPnzygSadj#D*A{>+p z`=RpisZ=RrU9vfTfSU4K78!7P4K9N4?1DTqjm#e0hblV%{Yk0D_~n$$^U{k2p51a^ zn#!n%58qbB5|O7J^`+1wYR@ud;!V;ULpdE{Pr@Y=HD^aiU)`1vTjqf(qe6Y4W^I|t zauWIQC0cjEoKqqgTusQ=x4$p4g|QXITKl6s#vPqMvt}Mx>=n!%=751I7??hPEwq5w zy(1KBEnciL?#N5dm3gpt40+$9`4>=}n~yt6(>}{Qa0mw_UIVVtsL^LSyM9ZS^7Vmz zug4ul>4}Fg%S9FHCI2?q!iZV`HbFh!`Zk4m+_A>RP-x+Eazyox8yGMp133X7Y++c; zKu+RgPz((^`OOw~yLRO6M>%}?7O({J2dGUK%pvIl)aDB2I(7kP=`s-Ftrl^5%Qql4`kHjuYjm<;PD)H!Xu+s2#@as#6xIXkB>BJ!-j5jT+tnms9lIHOoS4%J~L+?9D=^lh1~X@zYtnbm5;N&HHE*T0J~%fD8UDc z&;m*bfue^Sk*>sdk{3@jg~!OqmCarWrFa{|kwNn~M&$8#&Tk z$u%0@P41oBbN1Or7$N`GtMk#K&nAASTjx!l~kBTr`ty38H5e9A=p*MJGG;8>0H&njzEU z`+Bc*#-B*p1zMB6y={8j_OZPEbA!eO4Jwo+}6vt+Ve{vYY&_uzh$L^%}0wJ`C13@ z3KsF9UzTV$3f&e4GR|l-oV}a+GR~^09Y&jWUq>S>)cujMb%_tdGpthM)9SoK>Bj6_}AZYCfnc6bmgD@ z!W?iYELBW(2p-hvxN`G_#>vYp9!S74vuXrWt58GrgA-ra{~Iq8%T*A?C%Nw>AHF}${u`eX|_xkU2j zv`ljKw~o>Q*2?U@y(1LhsRG&Wc^DY z-9D|cX?uSp;gwpbYH#NM=L>bBle^(up=_sslKL*$bbG&td(!u@vd32p+p~Fw8g=R7 z85aD_8tO4>(Bt z5kY+!8YwE4Y)I6|CW-9Mrsb35fLMn6zN}-=kkgdrCu(1KWe{?C$jEwN- zSE)o{dl=!PC3*GQ8N9EO}~+ok7_| z05E%lWiLR~_z-;2wo0Z|7{Tm0$iQ5_1!w|yh?A$@wSv-Q`%>((0Q3u#zPyjvQzoZ$ zn!MB``z@P{n?#zd-DdDoGe|==*&Yqb1ct7J49ZLQ>oGvm0QkBNPnm-PeB<`9C~-cL z*>f6HaQ-KoJiiYr(1HrpUm}^U>VTdkki2vtn@qaz_n0JtnPujJPXJq)!=Uo^zX*Vp ztvd|=DEm^=Io^{#;xX7Z9`qBaKsGF!ET8~(d7r*j5W!4=3nfu_so4nRiJ?Qs!O#wf zYhjoPB%bIZoyS&I;R>*rA_0V;_@#IR^92m-rh*nQI#4A~;YWWgGMtNKzDNapfq+kV z3G93fG>w2{UkbZCf_YjQDEa|5D0#0)^`)lOeW}MR%;jQW>n8Ph%3o7}uv@SdzkfhD z2dMK8M22g@CJXhiWL~l{m(%}fA(E9~P>%Jn+9XR(0bj?x_}_$?{C^X92Ia^un|A?L zGr?TKu~svRTnU2jWSBZE?*?i@Ha}m`pIfeUIpXY^ji9oK{?gM(?iTM#OG!zM9--C> z{j82qw_kswe5LnW7B6F2JnFVvlM&BZUj8!H*Ae+$S@3vekV#d}jZ+Ps!(n8c&M#5K zDN~h6PqNfcdFa}dY)NOixS8ip*VSFWaq)7?FPBN&%6s9R_;M6X}* zy}d^J*K><1zwSzH)w>zPKvb(=0_3bxSnu+WLqfphS2co3$qkQbhrIN|r$UMsc6u+b zE@Cw9+MM>0kgRgFVq6Y18_tSG`bEvZj-FckbmEm$^}63?%$gMK^0vN-WDBeEvl!Qk zJh^RL{(eE|@c9eTMM-0t0eV(PKGyQ{a?I&3 z)zcA|k}CJO5lXu8>t`)Ko8`1mpHfe)=J%g+a%W}N$WN~vCzBd){WEKIKq9J^0IKu1@Fk8sz3CD?x9b2_ilMU9%5Ey_#&-pAHGRQc=vxIGXh2 zH%=!kYiA*2Xzzf{T5O^T!Q!h19eaNyB+Gk zG;_e>5mqd|ECxbD;dvO0hj5v?XuV>%Tz&Ri;tAG1hB%)%g%lz3B-=EtS8>+YEO6Sa z0)e?lP)luXR^8XytV5m}8UIogtON|GQh&L*(&F@0m%U;gDi9ba*T}#Jf#kB7!J=qJ zj?XR%+N{WLWv>t*&_K3us_`Ly@Q0q`s8SsdM@s)20m!yS{+q=@+5B0hNZAu!j+C>O zvN!e>1+sI5(32P1>^tNd`7wPA9|#I$N#f-mGs#CXQbzOX#^5S&Ra^WfrWkB`%a3mn zUH0b3I&nYo|6^JHAFCxFDWJ#|#o$Onq4Gf=A0%^-*FBG@!@R82r|tO?j%!r`uCw8| zk;h8f><+7fgt53*X^b2xcyC?tEt3DAlJ);7H40=Q<#D)HB_RCzYuTHqX9cp?+FXeY z|41nYG3%a3@x9V!hsN~rM_3ieQrl2as7RV==zRdKsgd761<+a=`NY+6xLddZ{d#~vLq78K z1z@j?WND+I7~}!ct||*;>;8gWS%U>~u&{rh|5hXaHwDQ5tPm+M&%r_7Z~ax>4e@tU zVK%$E8QERhU5g@6+!-f=i!)rfKYqPpmFLco=MIiw&xM2WPXt{<&`r!k*RB4}@?q34 zs!?EP2k9(pEITn|fU5GlZL38p>D<*{qc|r}fo=y2-u4D@!xY{Q z33|&Zu|-j%D!B8>3cKsrO5OFUt!Bv_dunSprnwzo59Xd z$Gan|e$=GNn{+mlv6Xog$(`BG%6u@6aN`FEBy(AiY8 zgLiVYlhclxtVl*-p|VkDfvFji(K{=U=$&HE60HxTUL{5n(+S8WmQCVbEgJbB>g?gM zbrQyrTse{!ia*Ea$gCr1!4T8!ob$OwKRj8jZ{)WzegAWL+w+FX2op0?dFJpnGeOSn zo$D2xSmpK>;e^A_AJ5b2G0z4@gHJBjFQn&oI@^kFSDt&lS=lv3k)y6oPOgFuk+;6L z*89P{pHb2#zQM;ur^v>;&IrsmzmMM?)kN<--Bhyk#81+)AIiWhpU5@P6jAeW{D>)? zoP(KC`&^0Yk6VOf!}l+AwvcIj)sd?JVZAwPSCbZ5FZbU~o3PQzQ`+m9RVRO~p;qf$ zh80GWH0glPFBcy-ca7;YI`g~bC(6jrXZ}*K*JJpmYrvz+Gp4yX{hEitS6{`rrYKcn z%l=s-Z@R8uuYzg@1y#ehGcUuG&52Wo!gQm-#?h3at7Vy@{UYPKYXL^1tanLYpM0vM zZ{To>m9G}D)!~gGE5DJi=vZ+q=eNn$b+-!{Jx4uss;X9QaSSf9m*w3?+s_#~@KNWDgEm`j4UsO67-g(lK;j|P9XA8&u9ObDR zpEXG9uc|Wq{DmZ~q_M;{^mVJjLubddaC>|izf(E8W`N5#rANMo`ZTp!@cS$9EP(vYmU-W<0?LX;7U7CKs`0ZLMg7`F zp;VC2K(ED>Unmsw#^^R`4k4X|>imhSPl64BC@~84`2*?^G8mGDLZMDktaW#vkv;Bt zs1HbJJ4#&y72J%FzWukW2f{3`K&s6dDmWQsL;pT!7=;==Lv=>%p(s(%bWQ-kLDFZA zN!yt_zJ2R4VTO8wdVe<>f7Xe()8P$Yzn)rq$BlInlQgo83SPK#PJS%Tx3~2V_0`WG zIe48kw=ZpKeRt(hA>=+RHq|4sV`?HtDI8=UjEQb8^e^evocCbM2vee}KyYuG85+hH zvq*K7oYR~&8#*=KG*x5tR;X_`>e~IWnHQod{t!w||1|x0bf7am{qNr0`gPLs|9)zS z+PhoXerM9E{*}1XN_Pj^vrYJO+%eZ8WO&~5IE^BFKdZy>-E6(UgyZekZ1vf%^LVc+ za@H>cOx9};_9yFmH0eBHRdZh>4%nwx8VZl*B(YSS>git1^wJ$v+4xCpbqKo;sAVyn ze4l>K{+l&Jwo>h&-El-k*dYNO9hx5LQd$`?1J%XH?xtIX&9CjTlYHm3|X-nN2m zgW%as5v>%H>m) z>BXNPt7l@YJEdL!l|>`-Jv%{Va@Na|grYK!?1Y^pY+bYfgH7Iu9Mq z@EVpZ*$(A5Sw{&%jW7B2jjGEmahH;{-cyM5&-h&mIOuOM!9;q69Tx1HULT0D8TPuD zX3hV>Yl3xtpP9-}fj0JD&XxR(?yG!Cl_cbO^Gh%AE{9!fXINpHN4fXC3Du%jYovFh zLV{$8;_Vm5CCix#N!-->KB1h7{zVViMP=Q{{MiqO64OI^BmXLIfj%=js{_efHZ5q* z_)JG{Q|1l#`?m)xzMr01_@&raUnSbTaI+MyTi=0&l2A+)(UQFnM;K4ZNMF)_|4_MtkRRSJ(4?` zm6Sb_ErkhJgI$rl;*F?HVVXjD#k>SGQ{$^kD^*6cfzw;$r%QmEoT$FzK(HgyRoej7A@`KWx z7HN(fH~a8|#f#nX!((bfa~#qTEet$(2=C=ZN>hF zK1kM$nZg!E$gYk-|0Wk(Hcx`&!wcU*Uf2W6=b`Pa;G!S8#0f4wp{Mf1=v%Son1f_8 z1R#Y5q(DH57f3mO;mZgQJ=Hb>ynGm2v3b~lmn4Nv8O&?%=ds+qWGSTyZ}Iq|p0TT+ z^--GR>%GbCkKyim3pc}1siOTB#x}R;7#uDV`@4J9L70; zS0Gq+AH)GcGEjvTFsK0r=$l+?@Dqy1iun>GDFF05@>Kp~+_`e?U|K#!+G;Ad92ZwN z7XTXqa1$u!27vzna2EiN@c`Rnz{UpvZvkKc0FnWKOU{?kHj2f^dWe`nm5<<3_6>om z3jqMr0I&!EI|1+t0QUg!2LNV|2HFS#;Hkx{T5!#K3N|3FPaz}0)h%HZ<)GPn z#wEx&2N`gX@$A7MSuq;BdJ1lygoc>81NAizVqCbomwvKQ8X>ww)J0EoPKZR#4ppor zUF~88Bk}{`x6WIR1gNTMvu>!*3>k7^va~nY#z6*t#7Y#3TDECg?W7;aK{9hoo2s8x zUNRt`B}9_C>%dhnMpt?9hR}nr5xQ|RH>{yLOJ}QJnmGh>rrp!fyEdLy5k}mZ{>sZ zMg1U1q^MaOqE^aJ05SE9Q95A3_P>CI2cPIg+GgmG{UVIgjsiEmsOrR@L@PO5gu$vf zpXgx$NAZ6a{LD6^8+hY-R=l(~qmS5@!#e}3eiDhVCv~q}YcL~B81~nWg0wcHB~6u1 zuC6YLuPw~bK6PG-l3ZJ$E+EH5X;jy1t5SG~-sGP|(VhPYrSFnw8Bc$vlpH5HfzT|C zhEMxOyh70P6F*Bh^-%yGD!PSYb>TJ!;qt8r523}q91;8%34xae$_G6UJoks&;Km|c ztS1fHmglke4VXSD}Jw@C( zm;g|2DLsPb0t{Tsg%L)@V9?l0hCGO7lwLH@Bt|Z6JVE#$ErtaL*Z(9Ux}l9i9cz7} zr;A+qu`f->b}Cmg%0Q;QGz-&xQ!V?`O!3(mR)@zK6MRH?p{y1ws@MD1KEaCB-7Dm6 zf3_ZO@94z|AG})psLPvXoI2>RSZQ@c(j<98T{o~+;qvrR8ixPtT&e&=ky;&FV^Ep1S&bL;wr%H>K#ri3#2!g+gSqPMljOF>s|Z^ft$*LxWjF=@2W{T6GEt%i4N@h zW-mSLJpJ(ZxWdr{yYugA+i^u8QUvjrgb*&wdLC&u=}eC>+>Z5;Z2YNrEZKY0b@h8O z{Lh)L+?qe?m?glhnKEF-xwQMr9h?VgQR<+sPfy^tjY}bO~*#RE3i7PW+mYHg+pD=#T_`Eo=0Ln?X~M-$e^t4;?<^RQOuxL zj-<6FaZ>co_KK%Lx5C4#O4%#eoxRbiYf6W`lmq>Va!ae;H*V;NKdpK_`<}U{I0&8u z_wJ+2=Y&B`e|Wl*>}jy1=f9%|dDjrbm(+Q}>9qtXYpCjF!s&1S!wwqdOwbFX^03ImSg5T+U=5>6V&a$h%eg^Tboey$kf4b>m7y%>j{(Z2vb`&;Btu*xr zDwy~WPs)Z?hcsZs)(%I?ra|HXSS$CUM}DycxVp&hoe!3s=wPiCL}@!81dKdidjQWq zsj36vShLVk>Xk$^s9=X60pNDhI)-fcGl6C))InpDS-!fiq+n`~oxkatZA&vv!9 z1CXhwAlWVn;OMzPvYNC83G#zCC?(hwGDJ-L5$c%z7-D(z1Ry>+`LEL7 z0U1g)rC>eE_I4gER?n(DPj`bu#@v1Y`Am|*j8=^ zXqH4P8c27#KLH2{kQpfgG{hI`^>l86^U=0Q1ELB+4^Z~*AUJ&sf(|-B&7VY|*ZDgj zcdi8Tm)jly@C5^4xxDgRU7;4ELN28Ou>Vt~-cD%>aFp`+L2&H_U@3G1fr0mcB?IVr zcRX0zllu_*x#*z?e4=0FIEOA|RCy%6d#8wYplgRY-Mee2D0?3`<3KR|W3cL;Yy`wS zyP~N(KGS{#{k&)if^AN!t2++3QfynTXu#Dx0B(gyijBep6Z#pq8Uwf-uShc=4tTcq zyGaGO0v0UP%<&nRwdIH+NWtTy%(5Rks$6^nM#ZXNWMv3OR&>CrcpgKPA(&EZshs%0 z8DRm}`ACXQM;}ztk-!G-37!<2Zzb3T#{f{TBlQ6K`Kt)f_LYF+VW;1tV@SE89&jb@ zdho_40MW=3KB}Bs+`Yp*ngcfUhv^vd%O9xObblD2b{a?ooDqsY02Q?AuGwV++yOR7 zAMpXMnjVO*VFRucoUN%maEPZ?DM2H@Vo0%N5CXjkXFz^}-s^~!O|ac54?=i0`FENC zHG2`#05x1gLMgUK-awn|G8Xi6rYMkVc&;A4@(mxr=FtEmFqVE2eid=7e|Us;V)Ppm;sIrJd)>r+yLQD4$89ty;IXbn`b+? zJqoGqXwvqgU}=dWK<#fw8zF*j2i(6e86eXUJTX)5*g!`UF$hus+P?cnUt9vEH|I}q zJDu;vn9%^Yl0$GzCILR7@>Gq@Qz8 zB0qu`R8fA(fAHQY&?RSWH0b9jZ*Vdczk?~01Gpnhqrj!D8A$3V13lsndg|BM!0c@aSIq)DSLM|W>1=xl&u&0~*FZxLHDEcR04!VJ z)o!;X1msj4DfXHP138B6d02>=h2m~?UvNeZweH=*z0<=53R%g4+Xioe6hm~-cS&&4 z<`&(1ngT(C-hU?mE}KBoWZnZCz?^77EniTkfrTc#Nvk(d?N0|b=ARBmw*nw~cLaKZrP;np{|`K#Pj5gg z#AwCvgKjW@{1|WqJ%m6H#dI-%TL!;;5(@$k#9$dXmR7W&um5-)Ru}XDBU=q%EG>~! zPY+rIf^+D=@w;odXPX4)RZJWQxIYhogMw$@gdJ?m#Bqrj**5?hzV!gqIt9qE|>PtQaBYyCFwOk8>DJPKAA_-d8LWUt}(&7LP*I_vzxdX5Gy-@I; zkrV0iF!nbAsASanz9 z`2-wCBh5G>N?v{WB94DsKPD(2`z#O)M@S*9wW9#0@KHUQ0!6xt#pxkkf38n*> z&~^}hq!d01p)fo_L-c5Xse;r)s5^(f7+U8n-!Y_gU8id&TgBskZ4Xy!NBb){bXw(S z0BEHWG73qZ17zBlLC@fWi~#*72m=5`9|eq*BUr&4Hx>XXc0d6gdpxO~O@2%e`362g zwNej3LSl^Fj|$cG`sm5bZ+$Djx;hrU?HSK+B0RJ7pp}o@cVwZqz{FUvAF`&Y^zRu03FR0?jpU-PCbm(Mfk?Tl z%6(aAwUmu%PP1e77m0ufFsi(GWmhMr)_Cxs4+V=-Fb4@1CNnl3vZ&&U- zZtFe)eQ zXAgY_)4>q;Q)dEE?{BYRZ9%ma&1VHxQ9Ko|wyVj6<(h~FGYZ$q2*#Ocp0`9t(D-@i z(DLgb@aqVel|~=F=XNbt7hkWw)%TN}UyoVQ`!R9*Lskh&;msY`)e4h zXJ?)GV@+aeiAQ+mpkAOVxO zF}K6`wx#hiO^LBYIuIkq*1z4kPd02 zK|rKCq#J2Lx(AZv>FzjZ-|g>z?}vLo-4AEayz5>>-h2OxICH1!@ZX|<{x#_Wa?JKz##RBSArgj9fxWxh-_#mFZRd1UQ7QhQBQ$GL4 zAV!t@MygC{2G&;Z$BE%nvSp#<)#c z8p!Uh`+C7M8YF~obLK8DRIoc+5^kHPTi}Bdi9<+$W*?%(NX}yTpGFVkjaY=s^?i%3 zC(j4wg^PjiWC{F+r`npGSkz>3>V_x0+MU>N4v3b+{g(L%4VeLaVc|kifQXl7)S|p8uEnDqYraljUmyX>BdHelle`PDidYuvdze|Ldq=)uXkIFks zJ3o`1$!*IwG<7Q%N~_8={=ju`Qk<2k8+R*`s@ojP&7U{uthTS|I7V0K`=6U@KL1Q@ zKjz8nP_J)NTMolY6@~ef8B^IUwO#t#UZ1BpihFui1jtq`KdBi1d{`GZ#agJD)_W#&?w7oQy<#!Bb%2B2bb0lXz^Id*Ze0?2l@nk6 zL`6B?1(IfOR`T47nDgSn9{x(I+bbUT2mOYQ4(sogmsf{#oHRE--Ic{nj|KP^h-Z(} z$yaO3!EZCrj&rU5tuCz4YR=4PUKHX_R>@KPdA7ffZ^|91XEPwIy`LKzCvWLC1qKBvsn#oz? z^F<5YumF=4fzcLqe~Y3V+olFdD(?DJ+d}U)Vb=4GkI&kBnP%@^xad*NbAq?IPgUn; z`SU4>=i)n;LICa{Ug1Mwds(9ZOC9Q?Z(r@qQ6+GE2&G!5{_ve`*o+`{B?kv`pdlH& zKy9GJbrAY@cty?5hUp06uVnDp4>Zg@bQG*3|6000@Z1ly7X~k=8z?0mguX4f($?;n znvWp%C4cJb$SmyoiY?uZK^BTc$b*f-Vo8drrH83A$VFIF!!(u0_i5v8Xx>H$wlpF(S!fU` z&uHW3`am}}nKgoS4e6WGmpyL3qAZco1o1ZzYRz|ciL4zb@ql9@Ylhwkq89-T*a&fE zI_CXpU%nvljG~@<^Uz9$zElgumhM6z3++PW!A5*$Ns5Fq$n}gc&J3NMD*%Iwwr872 zKI98>au>2IHX=Hh#skx@z_b!=4{q*1{TEP=1nN;hJvK-n0||T}!2u*}fP~3^1Z2ex zTh2=lK11_Ibug|@|0QYw` zw-94Sz}E*LnFlvre3t0=Bx>`>pf2R8vaZ?o5Ldd{sv72P*koRvIp?i9eb%7<#meBT znEQ{zP0WexI@nd&66C2*J!G0flCxM4QPKICP;JW_?}T++SE6>7ZB`$PE?1w=U1t&x zr~r)rH1Q56QYGqyB#U%i?2lD%=Q`Fd)1^e|qxX=!DYH>}r;ki+N?T3fuu5K!W}3 zNo4_iBMlpTBdsuVEs3@dGAr`AfX1s8_PCXE3q&d_MN!$^4Q#ECq$I#TpPwG4WC12g zPwRi=G2w34Q>Fjsef+`f>2uG~{kxHzw6sen%ySbZ52ItqrUrbkEd-RsvuiaZYCnc2$3J|5_w?TKKW27640+Xg*jdWK2I3l)_N&~lbIR{N z+62)h)uAj6?us5T?^gMakhmsma2~O-n}}ZOHUHd<@$-%}YUa8B=I4Gdy~=Hx<>#mo z8sI;Jdi*X(xpVQD;8^GI{^2-u>!(P4|A8mlM9S&TVcGfwzfK3_R(YqpIs2mLN^eJ9 zzvlmUt7MVkjM0u!dWKoa$=@2g=HroTQ8|y{lZGLmHU5zuzsO{ki?GC75jr+(_N-WBge;eJhS2eU&rDPpWMo{ea&!b>ZQ-dD8ayRJ*!pf-=kq4sOH zl!~IN$%<2&ve{H#?doP8g$jPxxGELL@812&>TRexlWoBXggG9Z>D6*yH+S7tE+@!x zpFxLTqGfXA%>;bXsjAz_)3{v_XwI%rHHNI3H+L64`t*D_9V-61bB(o_iYx$!*kM)v#an zuAvraFPrte5fZc3A`mSg>GtX7)V<@%QALL5k=NW7CPBp|^;wG`r4@&!tsVfG&XJ$7& zZf3SVC(muBpdPkQSlam)53u^*LK7Yt&{$w863GN~<;;KOJ-h==oEW&x3PfwptQg9! zk%U;LwN+o#*bb4AT)*u5YcH+gc_4inGaEYgp{g@}O_M_0^#^$Ri5|tGd#<>hwf6C; zXx5W6}Uen%g7Hr^Mu@re}mYqhmLeX0+qQ3_Lpy|40YJz!5GCC z__;M!YGxx{zmmH{IFPXoP{|sw`5ed`7I-#ltR}%kPjYt{2Xb6$k|vrsGM)h{y&lpl zZpB@0KU79;#eu4CAB5#B!8ni8djEUyglS(gVkAc}u>mS)1GX`E=F9@mw^}P&ixJb% z&&KGJz22{#SvT8KvVXieWzfOfZGScbMzpe-w{+tumNh+N0g9PegSlwss$ERhc;0(W ze;j+81F?f9KWMvf(gwej`S2kVYdjbI5O+6-_f0;N0z!B;^OsjX`Olzx;rMzW5cm&Z z1c6Kt@cPGy00CwYDBKIdPt7M$2*hkpktAwYlh2gy*}%LHLm(DYNBVFjgYj{SfT2cK z6^cLt0!ARU`kGla5QYD9eMQsDVAv?N4YU+Gv&Kc|=GB5m;2^x!u|Vji&jp?Yhn zJ-aH+ML&{7sn-KF{zh7Ar$hNc!c~!&cJYj?rCR@hkNH8r-PNV833fX)H6(Xr!lkZc zbh~$-z4XZk@0aIGqR0**5fL(AYpFr`)TGom{{hG#fC~aLAV4Pt0?!IV@G;pLh(Juw zl%+Y>HUAm1qC6@{BL^V|koF6t$)kY)H3;~D0Mc6!KnDRj5QqYSxBmbP5Mb0(P$|u1 zmg?3>cngjPGdY2Xm03ZrG?&>nFDSn^1T!Lm1O!Y=bLnK-TB! zTSs{9JVVQP34&A(<==Ewyh?My(;{|y!!Yd%!7vbHVdMwp%iJ|)-@qHe(?!;^7>nB} za)jjr4a%;lrq>`0Oc7GdZg3%bSPUl{63p7kfR08GM*%K4M&DF3t zu%(>_=ssfXJk~#)nrPpWQ9;sAMF8UYeyIJuw;P84L1EWAzesz`pR&b$qeCW?C%m<+ zu{sm=Y-QvV)!kW2zPTWXNQv1B(L?TTL>IxxgU$eB46<{n0yo4iI+F|JSJz95no98T(6kA-OEJG_^I&vEI2y@4wJaX@t?KfF|xEd1EChyMn~?0tTwBwR>$%R&qgPmlh)(9H4`sg$fqG$ z|GRPY_N#{5Ssv*ZhgR+;6`Sn_5J$H*eNNAGV{wg!WC<%B)(4YD9nl(Xbq;CmiQjoG zx@dg|$G;8EG0=7WPn^`Apg+pjPmEiDV~)8%sEfieO@P*+QhC+9^2pCQ#lebs!0E?Q zZjv;PFRzYc!OR?>^}N;@XX`lDqaZclWO#3>$x#GO3}+Ho9VdNCGj9>{uQE?+#V8At zFll%qqi2}j#a7JW*WmGAtGP;}=bzqZVX1dDx@qENI&qQ))uU-~7O4l4ZdoT7{DE?Y_??-5nL&)_|+A3$3+DfYDxpQV`o zvi)n}=4-GOlfBdHy)x@)jFa4hZ}#N-Vq<(g;bmX7e^2YyX||s&joI8TODXyGmbx(M zM8b<88u2}?t@#62EXsRYK5E)8lc|B*XN=$(k55?^J(cGbf+>5umox>B?y6C-;gQF4 zZ+!M7A_4IOtV@W0yWfvppiSv%BI_E(o%yh3v+YnM=fkaUi4N1{>hKn&pOPTW)Y!o1 z%c&JVc0rnqG$^%XyCL7MKd0q4u9WxpcPvo60yKF>9M=Su4*_*W{h2QMV@Zx2_3!#^5430^!G~Fk_NI7|o@%P#p{YiZ z)p9jQyY4faId$cfY8NqocAMXi@Ae!xG}FzCHH@oG92C!*+}7rG7b@!ad?>&!{%WLO zP+P|yZD3H($W70#Y_B|pl;KhLY;A!ZZI;vteF^xtZ}S?c^2hQlph}Qu8uL$pY+B!B#i&Jr)FZIN=mql9r?w4*?y$3h@ z>gd>!KfxS2W>xAZR0};zO!WyX^L;(!Q>P+An?LIKf43RLJat6B9KL=k8vR{-`V8{I50A~Yf7wC{d#_iT zPzSJw{(NrU4)U`v2lR#bZ%g?rrznLud@J04^=WyV_nRLU2CKV1Y}Ch;&ia%Nfj&YdZ2J9`ya zUdR^{4%F!=xTtaC?J*aRxwDG3 z20kDc373|lUu9@eX=y$4Q7L=D%8KM)qBU^HQzXnBg3_W|7Ng73HB>S&WsAf;)CH%PjY*LRrj!m4dt8cjy7vF!W&0tVm0aAx(pd z8oFsp`nK$aje-jod!CjY1=uQqEfrY(W53u2R*|sIC+NX~bFS8aQj7-G;dAH#Z41e# zFb%3{(C>S_MtKjNX1T=+urjfFIXyTp>_p~SJXw|(p~}z$^cs!wtxTn|7&xke`^+Ea z*{Lbb#7U^LDvSf8>0n6#tx9$#cAOo*&5^BFubSQm79cJ@v26fij&#=vs^5yQhk}`ViWMkth^`K~FO&B^=tGrJ( zzM(`0yr=yTwgx~9AAdSDJ_v+FJd9(|8|Lffb>F=LA8`QBF+XfO@-(<3XP8Oma!@n2@$ zu~vKGmQao6#BzJ0Nb7ae)rg()9LaIq_}CIBAmGsAwr@_E0$7v=+?z6c?j+9n zNa@&DeJV;EAM@O#EC%v*$pI`~ZwJRR1vif7@=rSD{kuc6%H>)x2EpcHvr4Vcv-soc zIrOu?tw-u{7TuT_WUK1-wO8+tb%lC@EZa{;El<8+#QTUTzS4yRcCOLR=FCz9}oW+{5;d3qO8TBTYPcVL6=0T^H!q_TX3BQku!iX`wYSSBy^i*zxY`2Dr52TV|qnX_&n7$9{Kv9E_k$vWfIz=CA-%Ej?W6$v;q>n2?h#|!e zvylNCdU|du?&3*UT=%FEVeaBe_mV+KboYZ$+#$K*Z$(1`;L8dyzcKaI36g)EF0f;l zbr3+TCa6$cs&!i@Ros%}0%k@N%T5MZ!R)iNgMfrr<&R{qG*?x>%D78VS(QJYCzYO* zkQoP?gBWcGHS?QUSd+%BV)35VZOc z0|J5+)RHzxFb)x8IOSU*ilo$#B+7GMWMa#BvZlaifruEi$oPS%uL7S_BD|qRblX7K zczN+Af&OB_gy~`-hyJ1|TxHvZAZE})!cPo=fi9LpQHEOZ*F19I+l$mMgz*D%AQn3) zL+zaOP1NJ>H&J7DJa0pvf!Beg@dJ@=bg_cCwxbBqgIoSQ@dNa3u34H4u35HdoPm__ zE@g1jgh$$4p*WK ztx@B)g+<~5zNzHFltK)ul05{}Wi?9ax}Hri!=H2Ep|| zes1F7iV5R(1Q#=axpY zfcO!BabRE&vj|jCK0)eYt9PHnOvY{=P9wk<@;QYC+fL%D+dC}k;^ON`w*rsTsJ5y8 zOkX9qNRCIvuju;sNkpT);643&#uV)Qn);LTD>KovzzLH9D@>ed9Zine_w~>V;u-(VEl( zuMM)8%`bZ32RCLD(GzS<(>4)H{Y}piX2eZJ5$bVpaU_p}dFASHcHfly5B@2=F+&D} ze`t?znjMXCV%d#Zn^HXvvSqpd`<>F8mk|X^VF)ozx+Ew1$2~7OO&dd-)em|u80Gx+ zia*VSp)~34^;|$HucY#IRw8#gDT7z$!~8~Dspa%l3403$mmQ4NAd;Ip516@e&|!2;-!oo(38bguA)ND|e0;gr<=Q(W>8&#K#OA&G^LX{hWbfsGr3YQjx3rCgt_wnU&+mblud-qHhFf>m58uxAyu+YKG#wKA8Po&K+@O!IR|2qyTf??&=ig0ggVuDGy#cKAje zwbd^r`~L9#iFKv1BIz$W#k<(ZO?bf5yr8g4DsC1&1ZSyp%?BPf|NJ~BcF`dg80Lnv zHJix(tvUhZ^&ta~5Y#!oq#Za-Y{&iZ=Y|g zSS*iv7kj9(1Iv$Jh1)euHa{%_aMNtmmHHkv2HE5lh>8 zZL{!u4L0@W&o_sRyQ}Y`@~vxpML0AW_cK3OiigCE1ysBRmbBK4DD?~{>(~WM#m17B z0+W8iI~=;qBh=yrO~07uSq_I(amroNN)8R~xc`FS42N=7zXikn$(^x6Z`@qJZ(%!a zd02#`M#jaxANNw}3Pv7judfh8-rb9UP2amRbyXU*Pf(>Hsi6FViV-N4kx}v4-ofz9 zG#Vj|kx{(Q0|G*B1%-l`zk(B&v~CI_QcDXq;ipv|Q^|V*&>>Lro&{`jC7TmKs%dV; zh$OPnsu&*)N*CCA+KOWaqPMiXp7bL`AaHXh%gwDYNCAm8xt1G2oSev{P;AS|J}b%K zgfGvRde-;gh$h?y-Bp@~?7d?Bm)iaanSyT0xVvq;%{WRX;Q@6a+ynO) z`DsF)xA#LvUNQZ_wWnWJLb@@Vv4SK|f|8g#|L~@Zk9SbGl2ny$a~7U$b9!NJa~f_A z87(l!wc^IipS8!ga>X-wN)Znlp{orV?J>r+e#!)O>~P;Zl_8^#FXLKkBwu>|p#ps` zLEm<5SCRmoK_f36TF*ZsZK0=BV>4n;Csd1?){iF%n0P=K17paHJnz2x3 zh1209OQ(Ni{>fQD%KH6-!9kVJ>+{y4$b$hDld~K*k&=Lf#&qEOAGPZ`WM8NlugbSn z5#r04Oepe_?vwHLVZeta#4Yk`ip#ghw1Ng=Gqgq`g+-<7#O$~@Y_M7S=EU9tXC|!X zMd@A(UV0Rcf<&*C&DWW!D~6(EOEPy(AFa+G2RN}x-lp41h)%wTO^r!Cmi_TO*tcV< zpC<9BkjPmo0)Ju97+Y?CE7IzoayD|1mEZ9(-jtjZ^&ghZ|{0&G%=h+T%m1} zMd_~HNcDWvg7_L|w_&r0Tx>>>q}Bdj!lyI^r(vqfZ}U5`1;e96SGB!EjSo?KjUT=Z0>)8%oiAkqPIG0Ot;^I! zRPB&4iOZ2wPJW~A6jub7R8EFk;%JBUkIsZzot2X|7)E7_%aERMOH<;_%YSnrXXp>* zBQd}9iEwdB3Tb-16p?JVj+Ly9>M~On8#48I`Qy2-YPp5R;YW-L&$4isn{s;nTPpjt zY6gaJtpR?$YKE{_okXmg@G>~>*O?V}bNvGKxB55sn_nZvLc+naS|KJmY&igp08&&8-DKM}LMtiSde-@NW^Bf|+sp zOQQFZY;l2lw21YRNUmR=U4cyOSGlALp_b5j@t4WN_Pkm>Z35B)qxQUR1#KS5@4S!s zS$FD5?4a}6l^Vt(Mj#lW5Ymh#!#aAbYWzxCa@d|Jkz^5<+c%KW9@APG94@b^-+sjzJA?49D7L_Y%y9;PTh4#0-k+vI-JSAbq=};*Y zy)*oBu~8ikMN=hU#t}CL0m`I{4P=?&;F59>oMOhIF4CZG>wM>Zh5#mx`G`+Ubt{!=EdmV2xNBcbh8deP$Scv9UZjVnYm?E1r?FOS8Pr`G7tg z{91KienI7SNCgGz=>T4a?Nw8M zsbw?GH+Fw)$TKao((4^C%j zl`{z3&#K$`h^o3RdcO1dxNJP-K9-_U*Jm$d!CE4iBW&O>b^3M9b;j=4>{105e^UK1 zRzR<+wVu4Bndm5Ph3e7vqza8bzIj$usO^!ef9lSROEDf z0%!Qz%e)rV9Qypl%KDSTB#sAd&)=|&fQtK^2U+JFAdk4lH)rk7r%>Xihwn2kk>AC{ z6_bS9#SOOz<`uF^l<``>PQ(Y9=gThj(7Zy;F)0i4!k&KXnmsR_C2XH}xOsiz%~4P7 zJPP%p-sr2jn$oC_=IA}^HcQ}Yj93t<$(mu>fiBD!RS`~|5^&Q2ZG^S~a#2rfA7Wis zD`k&GOK_O=%AOmh7vK#08&iln28+KEF{U8rRi#OuQY7e^Q=FQXi|i5mt3bc`tCzHd zk42y`i!?(}E#h+pTfaXZ^M~>*5{?SPBK49EV!4UulUujh)BS}ldl(g;fA$8TwD8#n z3(S4rmk+=E>GHhcaOe-g&+Q-!jdH+{o8L17I>)T6G-6Dr-1J5*GxhvC-@a*Xs<@Df z59^3_rYN6QI!_LP*`ZPr=6RR-dPVod049n=m-dhK&`l?=ZuS+7(8^4s(@)F;SPad> zg>|Zph#2~B4Rbk@{vuuSZ{}mVZRW?=_sZG|yQrhkxv1;D+RW#RZz^^CnU-lw=%TI& z#@Phnkv=+foCh4yO{H(bn@VlTUDPM`dS%7R@JNU%2uNm?={Z?tm^jOm-WftgjDs2J zNcw)pH!*vHMxt$~V=_ZO6Tu!B@#4Fv`^s+ScR;^I>Icu~*7wSe5r;<#=Mj+9>(g`E zm8clbuz~I)w!A{qA2;(o-uKF0PlH@WJd&LfJQ98*I!@NNuQ-S4z?co6WEQzv>M}es zVmdssR$P+n%6Eo@{8$KIX*un3Ks5A!!d_a=h;$SL zS6mX=F(d@3Z;|=dh=H$qWr>{Aaqz<<^Up^D*}(~_`f`!}!X+W91}CXO&l%DG8hI0x zY^5N2gA$M=*(+P0_$^YH0iQ%W<;Ijt23#i5<8ISI8AHNo6V$JC|7kSvNRkQ&NUq!7 zfnVdYno7OEh4cnjyq#||e@?MiHX2-mbT)7?8SzPy-hnlLfK!bF?Y@Ew`>!pe=N#4s zt?j`zL;wvwgKNYI8VG@HVNT%JUfK8nBszR>WP^xg;ClY^cngLPnD6Bc3HRA(89`ok zU?#}xKnemEDq>#ty?&9#Eao{gyWi{l?Na+HLg?n{Ltjs|GgA3{@mL@Ujl*s{tNv5Y z;M0bg|H3{j7I5YPmgkKD^(r`IsHWX|s(Ctrh9Y$K`Z>{IKpgFi{eu2?K~>z9zqXqe z7!=yK=gYFU$6+)0wwwHGvc9$DbapdG%k90z6`g+93DW~q;BOH4wv(*bui6N%tQK0O zhSu;Nc#e$nEKjlhL%g@8yG3M7zO*YskFtcJxr6(aKh}TRw=i~?F$LCTwC(4&n*mk# zvE3M3<^=f`Cm16>6?ZxNu;(xd(PNmzuqL4RPN~~FJuk>L!{&<$3_YLVdLZi;?-{fqy~QrbHXG%Q@(Mj4=UO7`7w&m) zL%NGykZBgh8}%vle2lAwtY4sK(3*4}yCB_6i8tzF==msDI$1wY&wFdq0qlZQGi=@{ z)6nw~E+4Xf&YnRl(njopWV2D8D8110VJ>~Le%7A%R-`3QUoWTAUgCrqNON-XgzB9C z45iMrJAxaDu|+9`x(so#lJ&pp`DRHPi(L?J#?BKZ6Y4U^g-O;=+XHT^gkTrMnjLXR ziH5oia2=5LQ}%qbAa%wrh&KDq9mN;w($6(a+E3bpZb52-T@Y#Jz#YXN>e9zmPTEh{ z^Ua)86}uqZjGa5`Rj5lZR|IK4ZV$RSsRVXGnAs6m6lJJO50@QjKX%VIGt$@C1tDhN zxsq^o%k=XSibF!GZ`$UfuB{V^hUP=8zrXoV3vO-h?i)YJVkW6q=m7f^xtXd-q1dkq8Ppcqpax264?$>+fHYnmgn zR=`C40-_x;p7oa|oog%hCKj@HH}fFb$a2qU%7ld_p^QAXJWa$G3#*b})~OxD7KDVj zVVHHI%B<`}2ALqfjf?r6J0l#Qta}s09R1#vP!%y=1&Fes4)Qfo%FVzdM0rg- zKyVunJH7`ZegQ-|BJDr66RWO&;om)oKX+`>^}h}Kd74-e;a8AVpvWw13{z2s@frhKf#?%67H}w_wmDdnS2S2~fyD5xh zCX3TXxbI_n^)4{V=nGQ~`2#Q#X9zlj;Ez_WJWuR^iTHib72vaRUM2e@g!uNFeX+9+R=pbC2NOBXjg~t3 z`Qsy__w{n#i5mU%sUolZO{LDZb;7(L#kCvec^>zf9u6Ue>xU;F`1ra z;YL1>9Dgpu-jJ!$wvG$mx?X18Kz?be73fIx0+O&@=)AK;EnnWn*{sRa^y<=EI6Cp+jE3)iCV59Va^gXO z$1&iH#p*JYp@D;B{)}cE)_*mXcM3h?lUttSi1xDDU|((!T;Fc=TrgYW{-EvAVpwyG zUvbuosh=i(a#Z5?y;8uR$NdHHVZnUa#wJIz1DlP{-*!aPb!DufI>;QlEB6k z(71Dc$Cq-&q|U8OI!VJ{`NSzrWgG$2tY;XW1irSlA`ygStrZK3k*gn$iuz7CZ+@G5l7e_^)IUh#cK|a0Z5eGSJ2)-X{wF46>pT*UXtNkGQBKC8kyo*}j!73MLR0mC=;q4$v%z(Wf3-h9# z6isR+=|#xrv(BDjDU;Mn>Z_1N%(q~N?&#$;wZBPI`i3bRrgl&z>(l1q@Z0s2O3uQy z1x&UbLd&4HP8gp+aG8;~rV8cV4j|E!|G_>PUh?GZeTv9M78T?!Vk>v+?e2eb|i z44eQ-1%U1kLbl4iwl6xBDNnww@fUpqe@%xP#7-Yihab&#mt4J|DjA~*dBwZRY-}$zSfQA)*e7Sf;-^a z3>g5hBB<#ZUSP>bsC)}=Ux%+pU($?a-#n~i_5dUE7o7zDKQbQ=Kj``_3S=Gravt|z zx~c@io*HfJ&g95}e9A>a8{zczgb9CG)c(R$QXBEq;?=Sr)8;Ak75rA%(SgJl?X0WU z&~*l?=&W0}lT$Wf67UCQ8N7Yk-CFGLY@bY4NZKB|l&(rO(OBx6RV;>Kq z;=Nr_t?V>Lv$-lRY$N@`c^pZzVp=(AuE5)X!V2y5d^B4R8GEYjN z8tzDRYG=zx1N>>5=xb**DL^}I6`cL+`G>7f!oHylR{Pnn>pOx$m}(ZZONPW?AS?22 zz?W!2xLrSEciFCCe7fS?h`P-j^PuuVs*{EnB}|sBTfc*&@Ptx)JD8!^DaAoyugykz zwbNgF|K;n3>y^y!U&ALBd^n=Ceo6Bx|Ki=cTkFR)$iai%b!4`3&<)1M-tA*(T8{Ml zZI{HR<+JWwsymCMqh`Jt#>sT)=leK8GrSRRk{;UTt4FppW=($cOD6<7NIjahWkmmVy%=3=*)4L!`AE0lEQZ1ZoWT* zBg4HV>pzv3t~qnW{UVObtX`id`nH*+UpUa2zvCBvOK;aC_1uzhR7bEuvop#Vzr+3QK1;U z2)y7ze>h9?F=$35@G&r>KX)sC2L^-+i=CzgP#A@1(NSlk`mp%>JSa?^;y!W9TVatV zS4&LumBOOxyH7|V`B2!rj3!Ut+aYKLq0taD>S?I>?o?`VGRG1#4E{<}_zHablP56* zja5J>zSKhELI=c{=^hf5$fiI>_yye<_p?5kA!y>>WHlixS*mgol9D=@8jb+PdaPuh zy6x2rL63`ki6KH#nEd~KBm)|ffx#qmnnnT55FrJFrqdODT2O(d!V7#tDll`jr^*u| z2T5QN18bfCGmM45LPoe^B16xW7z#vy$)loz6iG=)!WX0}15|8gvNyRjLkLI=$QW1j zWNNvVre8n-HYoV`Pk;;xs6hdO6evJ{NW^{*&d@Xnq!5CX(ti{b24oUY+Ldcb8yxTo znH-AkFi8!6cZ(y528u;N@fj#a#0AA)L9yglP)rYsX~E@@!~iKu;2842F<>$vv-}%F zo~0=YJ^~h)(FesFZ^6nWptuVZ(}sd#9#EVLiV>by8rc61E30k4kKh8%1!%|PvmUxW zTfG6enT1(=WUj>WAkOW;$ZaSrC)?NqUM&jiOt2zMiX68e^>8HcGJ~f%y$m)^`c&!E zGlZm|qfT9hnI&Dt%=2bbg*1J0HCr|-FC(`oI7M1<$QVDK)P(qew&DD&Wx0j8-8_1e zoj8-krb3|uxtQnZH&fU0n)S)VRj0j!t42OUv*^|Oz{v85I8bfA4=A^;h(D@FN?{@G zeq!*Igjg7t(QFgj->;p@-%EH*mY)w49uyy&U#E~S`$__hzLL6`<3;u~ERo|yo=dpo zGbpc0M|1eU7XNW(b0>#4q=u$*Yv=#5rsDHM%qAZV($BLjNBO;myR?Rz&O>_>ItfLr z75oZE`&Hil(oOaZ5MhcUO-m)D@&q5quCxFZr{L}Y|D#7>coum5!AP`X?`cYm*PrMB zHreN93wduXHHY5uX@`K(DS+#gYl~a6h)=NNVxg*mbxKNMb}e?P+&w_1?Bqj_4y?#b zLAl4)U`#vATMqWwfWt9X<8e}zk;y7a!)BT~@p3umJQSru$=#-7K!VFGxh#9fsskQa zTa22Azh2{SG97&4fA@|9-!a<$;Y^wJx`E%;Xnk^fp0lBtiF(Lw5vV!NJD%k=n%Al0 zH+a%4=B{rqyPpwDUszd_@+o(oYKmC*Gw&91sfK9sNauZE1H-e z*1MQm-)-19{#xl8UHjtFAYN9L=5O@Y5ij1*x_4);;r(U!=-_ElVRh-;%WMw5|wB*T4#4y4ueS3hWBOe z_-I_W-Y9KqYM}lG;fT-?|LZI5GL6Q#EY{1 ztX6eFOp_uv!3a($h54|YMT{J+W~c%>nx>53H5rr_Aw&i>mWAQB!xWSrX{UEEhy^_3 zPf4ET2WhdIr3-nrlo`v9+)PAfPFZ4!H0B?AMc^OY6hcw*AkVLZqp1>wi1>~a8=Lvp z9ZMoz)D2?<*UbihAhIl}1UbvE+)P{q6j@Rz0a8~kda&zT1WH0PwV3A!U+JPMp#&C^ zZ}Cufz9OhG|3}37j~JMTzGXWc&YhcwP9N%Oj{ONtK4NuA^Xs@l5Cge>-KByyGFle0 zw|qQ8Wfe)O`WB1(kdtp$y*wNaqm zAoH((#&FrOM;Iqul8wodo*|VBSm1Vs_~rUVpe!1XqhX zI7`}Dz50Am@HUEzloT}Ud5_os0UW1kA-Z%Ch`*2E8dn2(46>vq8eq#PODaGiOX`wF zXx8@6FqhEm)pBWnxjUGM_j(?vfY<-#pS%dYfCjAFr4gOA+1$9c)kBIKgCizi>}0RYB*5P6qc zjG^SOdmguc;g73Ra2lGrv{gu*7@RHaktQDeJF^aQQtPfj&@iFR5hb=cAK)~83aUn1l84t|IrQ4+MqSd zGA9kSzji*t9%%m~cjVgxL_Pu9k2KIXjNHYs4E(~3!u1XfhU?A1sTJzuskcP>^a8B& z@D!2;S@aA5#Kss^A7QU^9@hnh&Gc@eKd=#;_5t4v=Q99y1Y2i~0M^eChe zC!XQs*1qF^u06L+c9m6A-)rIy@eocY%>$09fXwWkm;^>u|o@ z37!L1BG~za6g6Yw&wn`q)r$ZpOD#tDR;YW!tG<|4iJSH9&+V{V-Zbz|&BafEA>%U& zah3>d`5x#rx-}?DMQcjhe~psc*ha=6GU~=IT&<`RV5Qoc1ng?UtPCa%>ffxOZNbYX zDx)5sqpms)0Mj=#9!Q}w0|Nv73~Nk}5DWPF#I{8vaPtJf#sOe;p)+V3nD1Z63W7+% zJC&s2zwaR=zP-*8@LqUWA7k$;mA2|@NWj|7LuW(=fb%rJ4NyFuIhh>QhLGU(w`Ko11r^g9UD|ea(NK!kgI%9_HULDT4B!MHZk#L2SNR{1mL!P4W2P3egmf= zVu0gp!0BNUC*r%1l52<+{KUBf4zJ{a{YDiHqr z@c5U6fFPRF`6|yHZqAR=rjF$-R^W`;cgqZrQL#Ql*DGdu!)h;OMC}TkD54=yG3wLP zFZ}?aSopE2bVwsmIbMM?qIc9A4jp#Ng1bz>Q?#DoUXT8|Y`afzru5YXcv~un=f-+! zJ0TL$-hQbC;nDp!EKG($dayuxJz>6C2QFu3c8Prn9lv>kcrU@@ui?LBc0*%@Tc7n= z-x-v*T)F4CoJJY0R^f;EWUf10&)vRS$ur|@T$+=zBmbm*`EW&?qm-D7;#vE`ga&_W z!v;y(hK{*TuHdbKesk;8buSYCBFJy!M>YNjq@dO{i;(u5y>`)YeAl3Ux3jr(%G##b z9InR~RDV8XTT!CW`jRCc(NOEx#fF%Vi;@KLK7OJ4LlYPri;geKB!AaTwNIm0+1HK` zY!P?F{b7~paZBgS0W>cTQ86w#u?umkZD$&!fgLH(SyriJsI2PQ+i|ho8w~ zE(kxX=6B=GJc)ln_hVb(C5V-NFk7E4m*_*cI}s7LYwMQHp7!vrwJ9$iR* z8t{K+|~^ zjZLH*dnDsODfOiBA{vw=bw5dyIrWKq8%L*R8033Jl1FI%qAGBzP7K#jlNT(sfB*Q? zh@0^w>ZG=yHOR`~w=9L{K|YU<%R6FdlR9O>Qks&!pRwAvPk-|bgsy6K%$oE2tRI?D zADg$%{y(SHYZfefF&cdFxV&upGKtY^Q4<`&i-(=@^3CBiHvxmH)%%4caXb*`ouJ{q z>Kw;wB>h1u3)T--U31)lqDC`rU9G6-j-)vH>r@uey$FY7wB?@f%{%g}94Tna!Ex0$ z5`Gvt+~(F?i1~dQJ+!J1+nn2UeoSz$xqtGWzSGaR7nK71R+18 zq6@|xM(+zK79LVbtZQ&(gdtr~Y_l2qX2YovkTyVMAas2Y+6IDr7xH@$u{a`M0J1L# z{WSu<8zQYaihLJlA{MteqD!{b0s{US5?$}hxBwa|1f>m-FamliBH9LqeAlb@ATx2q zfi8siNch#LI9;TPSRUetCx8nQ{uvrwFI8NC6cs|0I3mSYWMxG3W@P+o^er*+tVw&! z(p0Uj$Y*HyLUSTsa`WC7#s~jL2tDP*;V$HdO6JYozbEYx8L@t2j-T_UOLcO3n>{p! zm)%vEtn@aHtVmh-tvKEqhdvtw4dEE9EWTnduR`|alN|9>)W3gRxmODG4o#8q7&avt zrvrQQq#Utkw^+F%jU$1X-<=*ECwj|Y$8JG<`eP{hs5tFE3G_iaJN@T(A-?U$*IB@I zL?_aJyqF$IQ^il$!1I@keKFx3YV| zE%;$1(0WPe+H`JxIHR%yY&yL@+Gtny@nP@O_G(hM>T=3CL8PA7^(uL8z>|2s0IYN< zwG>W@${)wq`WsJd@2o9RM}?T5SA^DCu)%Caa~txIyn5J=3k7O$*fo?|pRVM)W;Ez( zC3jZ3D*eoJ&bmqLq!HFxe#5Q2{+ms4nLL@z?}#kh?=PqJ!KIupNj@kN^7}NP& zpPDXWD9FbxJImFRznRR?N06|rz#v6Q03|ild+3B19czb&!Gf|R9;e0);3a>mmi8TX zuV#olr~O5f^0EY~HAa^^B*fZy<({ms{T0mWC4~7MJFX5Bz5m|#wM@JA(ucz_)e_h~ zkUNQYe zrHX|pT&Is&Q%8cJ?0;{4DDh$|^{18X?8Q8ISh5})l-%Sr@i5S-me$#)?L6IZw_0>3 z7_R=Pc|am)t@$D?KdM^>y zDBY1UzN0rhT4K||I3fA}WkQKSm=IXI^r-7uigGU_?|YGNGLlUdnFAVe%ir$j#n^xy ziXB0r71V9N!aXv9t(+uJX88cIIM7!r1XKw`Ld%|{Zff}e=_{kxEP=>&$mnM;@Tu@= zyJ!_rlc*5bG`NP4(W}w%vk7Rs7~})g<3M>-2#OMjKLe5FQP8U~@Uw|%yO`;N-}G5Z zrgpQ?2fmpNL~fQDrXnWj#`>(mwbvia5oESC{Ra61(9Z!F68=*-0fqLT!UZTi{}gV( zRXPYcbwHXU$UzyIyIFRaiWJ~z1UOL9&#>{S$moLHm9eljlI7>c$E{)&k*=A;@!}Q+2ZhAkP2| zBzfQ+OSdx6^ELhlW}G>fc;F%ky%32u6*taYQatbt0(v1bZ7M;Wxh4nauQ3ezfgkqW z!SA`*rNP*Vk@oD$;oMq5=Rbsy5$-mh0V`!(!0Nuc?|mwj@lZ(LDezo9(m#eL z&!Mn4$z2!RiOj81Y(6GT%|571D0n}noZ}m@$IYSl?#Hm?;LjhdaptY!f%k~$^jNg1 zoMUR7TA>k?HorJhdX-7!IUJ=AL-wQ(8>YkOJmw?j$|~?G^pqG3CroG!+0_^ftCHpG zGxMbDO*7@|*WN_fb#%O)@-b7CKCHltnA`l6du!o)Sgdb!`QydTSU<95F}AidQi& z2~1S^tzlIp;3ovcs|Aej8S(4G4h0?ez4T!R4SvP0@~2GZ^B)_CFDsffX$;xrfN6RI z)5I2>@;L{lcL}V6k3Fy^Dd7Moeg(hYTYwYbjouU9;d@!Z9|uU$mleeFpC~+NfQ3#9 zpL;X}`o#gFpR)CtQL6Q(IHo~S67_4S(RL!!dT#vTbIYoLwNUsRjUUQwkX?r}u%`UM zb{%y1Q$9$*+Q^%Avr-$zUJz8gJq=?qd66 z!1zF%?gX(YCa->Ve;0Aa=|m&^`}<)c*hittCP)s9^bC)vi;K&R+EHaL<4BhWUN17A1JtVDStxwK*VDSIjg;D{ z7`=oC+mR90TJhjS+Q$wYPc@5>G|6oez4~@)@Z~A%91iZIr!D&SS|n#x-l+&wGSNr7RC)uciDs6-_*}T5^bOKYCrp^OnK*h z+jicPNr%UAWtGven|{$Z6g=^AG3afbwpH!en!pK_|93K3-we-0YaQ|>GqtY1@X8Zf z)zUli{#pJ+_+b#TEn5GUhfvU+7$Q89^qYo`|*Xg z$tq70KUBsctmZ!SqeM#p*IV^OsXd>DzX*`^0acZl9hv}O1 z4pU5`ki|D%+Tc!t%lZ!u?MRPTA|vK0v&O5=H>OC%>aRrO@>tr-R1Lo9wWG#$AAvn% z@>rhGGiuKm-jDR0&N5;`69B+HqV|jg<_F4oOs6Zl8_%R`8r;5VeiD0?0@t}AU5+Id zQ#4zJ%US$62r0lktukO;I2?1*^Q$+BR(!R!-seyBhU4W872LlMVi5uC+u_xmpGVxX z!gn|Qq~Hv7!)x%8+&)MhoCU`G=F~*=^@z!ReuHV;?=)uPVdYI7-i^w{ijxG`B2f7%#G&+p`A4_#A&Mt0FDVaZSGt9O`;e9sWJyL+;>e5U?xp zd>VZ(CPujcKQ#C|ey#k*BBJr}U+REYlnP10`(K*L7VFOI5Hl$01=RlrhD$^b^I#LK zf;w%+P9K;N)JLnyMp7al6W2LBMjGg4&fNZ_V5ac37U5QmKYGJIzQJ3Wb3}fT-(55z zvv@Q&zHxY6VQhuUB&6L9{7c)T0{*xfR$cd`(|IhwaXH8`2Ti~Dp?9F%5X|xZty~wh zdAG1U=$CLrvmq*f!|6t8_43#&I5sHWkM^o*aj?fP=x%EA?dVpO3U?fIPMscdtblay zNNjyE-}+|Atc`(k`TMh+*kJ7(oMUt$apA5H`2@BJZA#e>eQXO37nSaZ*7QKt*`(og5kflj(BeY?0Tzm_bT&r-kZYmwZwr@3SM zV=t+$%(33EDo)*8MoQ@mli}^7kw+g^P?yS}(2$%*cergoP>kgtcp-u-JQh|EhS|ZV zyV;8d#^`YryXZhVMEBDa-F)PG&U!g@DkAz8PD(xf#6@|(>diZHO-|>qJAV)mNSyRl zNeuYOANqGeMXc&!R+uA`sR{?xSnM)b>Fm;KxVvFsWkT6L?+Z@Vppb<)A77`nP05u! z_~vFO2L^EFuj}we%be)g8$H=+JY3o7m^iplv^gXkG4Il$Q8j;my2x zOB$J|v;x}#b^e3emX=2fx_vb2rTyYe2s$z4E`yDJumW1N@52^FkCs8w-LRBMFym!s zThJ6P{#@y#qd6q1l^vIX>ehu(5Vx9D6epsb9oK=b zMhynevFRw$XNM^c_ZVj?2ZSloi~ku$eqUyVxYpySKSfw;3y|y<1Coib)>WO+3SyWO z=LZ$127d!a^Z6=y!$63!xQjtp8@s`11)LwT!rfuCqI{=5bTXhd1j|*V2FFQKg98<* zA89Z`jsOYv1j6G)3^~HRB&-d?B&^ljWVE{NbDX5WSUf~0tc8TmXDblaLI(l&XmRSt z(J$C(GSrVkfVWv0fETL|!M*}Cfx!5PNs^x>fOz5+soPS3wl5HmTc+eq1VEoD9C8$d z0&t!a)@nVPO{!unMg#Er889OLJFE3Rt)Y`r%^_HWB(+cxAio0s=)mfs00RjDtH;1d zSj(;l%q3TSs8cFC&hM=n^&=NBKsP`<0}xa0GFlbg^kiQH{6dzK{uqlQXRVfb0W`wf z*lLSfUx?UXQ87)zZ!$isnQrPnL&ro5NpImkOV7}cS8zqX-uueE_D9d~Hrox$GCk);gr7tNx+ae2ovNygL5qdgch0Z}enW+nE<@@2{tp*y86?ccQ#H z=&}9%WwLSqPgbIe*ptPwrgdzswVBk6+{#N>9>0k5BD8Y1$q>rRC?NK3IW#o25qiZ1 zl^WtWCZjoTQ?9l5(xigdSTyewtRrtHh|w>LI=*|CMUL+bw)y$81Wi9PE#ag1W8`|7 z|0=tZkhyW5{|D1z@-3UPFp_-7uzJF4`4ignz2#v^mz$U83uE#UqkFUzO?Q|K^!&tX zp?r9%Av`nwU$VbH3u=7XlB7 z#b^J7K~NmlDD&LgQt_ok7WaC(|HKd$OL4Fr{IicWslFmq>HxgQ;Im0zeI8Z0?pqcs;}OQt(uKV#0KQMTpsI!tNX8E>Y}_5F#Q+*+}& z*+2gfINE7^p5wfXJkJ}s;B=1PoqAC==X+2m8Rc(g&nm~NOUdUtOR*38_FxxupJr61 z|GUCGXz%28ZtLzZ0;7!2dPcTtFPFE3slQlZ;V)N$mF@Uj^6b)%-+dXto@|f*^UeM7H8{nGWJg{&{)7%u#fsc#{^ZQBiFM-eDsX~*R#Mofj9OsdusETKq}8Q(PNeGV(q^=R zbGz(=k!zP%lnR?SKDa? zvd6+D*zfdU9@x2!Lym?15nW&)(+X6%3!^BHX;Azc1S{B`Yu_}Uq?S_!v<{L2sxf64~8U>YT zlJ!e^++!-$AGc*^W_Fnx`psI%?3<3i|Fl~09`W;y@2w;~MgMwUR4XZb;}z)q(i7DA z75N(y`ung5gHX@JR1qfgJoZrUg}tah6v z+33!>;@LgN96+%to84p1Ao1s9DT@EA@HUB%0aII3SUNxcul!fVNTCRA%_DYo1$U3{ zijnCrv?W)t;1uSfNiT)erPxECXNL}Hd9N(_0u;l&{_rj!I?Og0=gXRKqd3g+N&!k9 z$`XmNVtz%ph2%c$>Qz!BY7>#YqZ=-uc+J~6R);BufSh@lkll1kUS`M0(hU*%1&ZWI zf+9t`E6u4vb*!&rxF01$KmeTaUwBQO8YGit?p6FUAXlwgO#20gT!CZ&!doK5{8H@~ zak(1Q;)ww1GcObJ3w40BLpB7&#zch3)9+ZVqyM7ohzQV$ieGNLHVrzK96ll@LL?(7 zeo3X5FB#yBNQC$fU}C`w*SXW7(fwoK@NVpgv<4$e$C8N8p^r&855J7Vj7FC_c>%@d zHeBb0Mubi(1Af^?C4fuN>i$9hsvjcpwgw{>y^rY=Vadxb4JuuW0NL`MH$VptNezY> z2EdY!S7uHSu9H3eu{xuVN!?yxLEVz*vt1vPJ}^#kJ(&|z4TiQMPGgcj%F+ckU$Q<5 z6DyjLQhAR&6Udw|ML#4&8`R~*UW2j!Pjb{?Nby<{krekaDdR`zoZ*M-ync&crfx>1 zo0A}4-h%=Ry#vIAhQfqgtc(I0vmq!u6Cna|EUScoaQ-p8ufbpd7+#h3F&#~Z>&WXx z=;-I*l>xOubS(;H%Mq*a%c_lObuGfh%X_fC>xZZ!)?nlng8~-)Es1*S zC~^%(e$|H3e;X+OX&C*tfm(w>E^SG4)HzV$i+v`HCH>l;krjo(Df(d=5vaaKPZ)hJ#Q-?HE$6)2_V`2o)FN@*qpY)-(Myr;ZZTTaK~ z>V9Tz)mCEZ@?{VNkGJT;jNlW&t`j_jF+82Q6pM*Hl;gLAAI+eBltQfLeO@XJF;jZfhP}I=sSN4)u^`E-v3E)5fnkxk{%x zZGp4!A7aY(F>0@Uw>tQre;aA&?Gx&K~sZwv`lk2 zsZDpt@qNdz&NaSV3N+qev;NK?Y?Eg`&9Bai{D9et-0+1J`8$5}bsmGTOAG_F$h8e4 zb|UOL*R1kh(MZ!p6A-!=VqPuSho20n{m_Qp;(A9(C_>Fg{U@^v&Uz@;@pSYIVXlUo ztrRp3Fhd&}cTPE&H&FdcQbwLX{I(n-@S1vTLY;f8hJMrcG=i*t&sdThPpN?2Dv%Lp zSShd&KSOh1>!Vv}&(O*_xR+{Vl!|;KEk~-Na-;$oh4VbTwi9kbFsP5epb*jwqz#yM z`rkZL-d@7O@}_qENO z5LdgtH-zp*^pua(MulP|Aqoz6d1G6yU$9_IkIQsvzR6FI)p8RQ_$x}!u@TW~$}PlL zlO`L`s-`M(`04KRX(ktwb&rToblVwR*l(Qw8a@JZUOE*kS&)xPOnjd!yaRtoeQtMM zfm_GIfi#ztl^>V%UY;=50MDu9gO58NY;*mXkofgT)@rsNy3s}?Gyf4-vE7>g)<%Yg;Ftd!Uz2n{`6Z9!Y5MM@w&%^?`^4cw_$)yx_ZXCVzX| zB1d|}(cDn`(FJ9&{3J%2XhkZi37Wr?hUys3RyMD*}m!_1-9d(m?x$6foXQIj6$ zx$M-Ut;%E522?L{dbF%{>lf$pyOJMuUrT0$e#|+vA!*$`H;&~`HULw+W(g}$Yr0vw zYbFz2uq4_c*bV6K9rI%iXAWaW8@Rqs=1e2#)poqP+P`p%ZJ5JzewsqmpHNy{VFaE4_3`%G<hH8!0QMB=V(p6XD5*egIr^jg-Sq>Cdd53!dJ31|R4 z3+v)2p$I@vBfokP^?^Ln$T!N*f}Z&Z1p`z8jC{EmS}spr%H0xy@j=Kka>I$_wC{u! z3$OO>x%<)3M{w~$ARG`v90=JB^f~}pJOD`?ffj^B2Ew62pxOY%fzZVf&;n58L0EAh z5-NoE>Re|abPyuGII(Wx2lGD-<4 z$3jYG^-BK;E@v43mj(THhxg3@UtZXH5Cv6JE3m0Qe+jio zNa=vp0lTj{RRa(iu&B~Wj`RfCA%YUE6L};?k*g+~Uz@*MgQmaR;7x;TBi^A6X~<9@ z;Vb{-rzXV~IJ5BO?}ymL{5`xWB3k(2{^19<47kdhf7g5P2z*d3Hk^Ep&Zh_1$QLy= z`*^y0?d{;t_0v{MKq8~_(f`K1?0L5Z~ z0tq@6cV^Js%htLuidt5~S&31s6F;}}#IX9NC3#ywaUr}*Q(Z+*S~O=`@MEW+|D{;QzFYWAwqYs$lbo*-_dZE3S$Z9?`+78nN?=y zc#Xffb)4ll!XzKMqYGV@20mcs)?!uEmscd@^6fV?j7Nn^I}2>F`M)pFnw3j;h*<1R zlXK3@2|7uG!X{y}XJ?(a%5fAni_z zooX@p`$Ds<8d?7D)z=x>qe}(Fp^)KqT!!MzaP2WS?J3W=#M*UNw^9U0mu6*_j1!e(aLP$zn>*UnDy(yI(jd%U_!=`k{(1N}OI~SknY}B{tH~%1609yD5`Tv8c|3UKq ztTX{A(`qp_ek?tj*?_OH-Y7Ym-GFbP-Z43v`#8<|g9>2v#bb2vr_c8q_aaj(r=t>J zV90^YzzzstxeNgC7V(9N9V82}bb{b#C(}4jx;lYg z)-3*&ix#Px_vvC*i)6X_spB-YUT$nHwUo%sFqu0sYEc0?)+_D8kYW|-?v}OK<3k&`?!u!d z{>Xbxf>QOt;=T|C7uh*xt-*J+k20vS(X7qa$SI>emSoJNbM>R>=_ZDwp`Yba!tL-B zv5_BBdqS8`cf>J@n%DZROjRT`Shi`UY5wxdPzv?$o8vN=#*lxk*5eJN?d_I%MyUpx zm+Pv3F^9;(S)@C4$o+Q>vvCaEDUZP17+Rk&vT>K!o-9{iLzR&MjY%{Ro=)tSW# z@*hXUT{6bXJu}d=Z4I7%*ANq#FXBI4%e9GRpy#Db)tj{( z>yyI0hZE1*j_JM>v_uWv5*CIz_p zvRJOojX|4H^BP%=|Ly%@YhTWmecb0LDSIY4v8b+9nKE^j*H>A$sktlv#MH0W=aXyP z?56K$4L2e9OgD39ljDro0diJM{_3#{=bBZ(^ z!Hk#~-q2|CX=0vic1e&pXKqIx=(GoX z9plv{H1L(YXlO(Ooad5G?n%N7jg((GI9~(f z9ma2$H$00tI6rN4DsG$VFqwHgf~y>KT>t7k7s9S8Iu;`N64_Ur7^|d}ym<{rAPwPl zUf*AxwALri7Zt3Ki~X?=7c>4fTCrP@@=J1k{VfbPN=+@2oOE>U#22yt7LF-sw6N=;Ky=6w_-^ zOgS*rxwLOMfVSHI?w^=FU>4(OA>q3g711lJffVV_h{}~UQ0tX+Q;bICOfo3 zG}G?HElW#WuEeKUd3v;~{Ic(|Z39aDFA*Gq4z5+OneWQjh=L=bx24i(rPGE{CGNe}t@x!m`Lx zxCCx0@8H;vkcYKu*yB8$JL4|)0pfJD`qbvB4rn~XUX*QqO~o&%yw@mG5+K^TktLg3d`yb& zrd$jmAzNuno(WqYfrid19hpfpy}hZ*y_4-y#fMepy$^fFWc;Nt=)L=7rs%GU{7YyT zZW-wTD}!9Gd-tB{JgKRCYZH2!VCb6I6@(G*onFDgv~cd?D?{U2Sb(9F;2lJeTDsI|AZ;VCi(sL zbAipN#gLH?RYRe4{gt17z}Q2=m?Eh*HDINm|*O z^gEZV`arz3=G`rsm4W-(0#i7o)AiXd*mjLUKkIpVkN7a>N^Qv;YfsZ8V(&7NbMh(! zfAMoEr~E>RbV~<|+hBm;xqfA+Tc zGp>Re{{wg?M}NyF@{Y5w@WlxD%~jaFwuE==Yyyw9`r08?cKa>k>g9gW#hL=rw#X;^ zO|6I#u#=#McdQCy;_aluH#OG(3eNuu-v0`L#M{Cdku0B;i~T{?tUxRF+=SOzkyf`7 zG!sR%O}>T~?4rCa+cOD}bH)+Ytiq=#H654xPut0A1}^vY1-s*gY)!roA4iKgD7XY8 z&P$YaShIEjM^C_U*UWdsW3)&F{#Pr@Nc8!h*bBj>h16}- zD0>0@HMmYyJWa&;d|J^f|$lzCT4 zQFOfRMqW~oyKsjWjo|Y!rMnW83+Ew?>;meBwdMR}w(@AeTXSNa8ipA$9E;}W3l~g6e(36b z__i%7GG+Jyjs;c$yE;~2=N&i9w_6Yuby}9poF;C^A2j^5 zgj9d{ndhHmaf2G|BS`Y|%h=l4M`pRai7>+z+*U!7T~P;nF7JSbeW!$eo`v-#mV&?n z?d@!?)p%rGYlJ`HG969Bfp8Q>2iN4vUY7s$@a`WxWy|CQ+Bepq*J zHw>_Y%wjyZ4`V$0qA*`ws*p`iVB+lU#tH&%|13+89`45h%M|^btStKwf6*zx(iN~g zE^qp@_T*XRsPA0>J=4*w9q=D7JaMo-ePW=tpt@O&qxy7jVgOE>c7^Fo1zsnMz zTzuNe z?c+>MkGHN=taaR}wE8iMKWOfvL>AP-=8m<~N!w<_hV?aSBjD)awL8|yud74ul)`CB z4@V|6u6g>}{*`pM(3bZiFLriTKI-izw+R{STns(@3Vup!5p7y|ZrOg>#Ttg7_{8-t zyN^8x2~gz!DJXzqm)%#zZ+Pt%zLDIEyv=U-_wv25Iz^BKY7-gX5Z4}nqBW68Hg9uC z{*019cds0_``t?&@_ydD{@~A~nxq4j zz9}vD!T8rkxw-`aHF~g)t_b1B1Asz~x~TJQ8J~ozpNuYW!P7DDBzX8)R_pLj<$XFY zEhm9}(b4VrkFmqu=Kl#|L)w|jLd5{WCK{#MgGeISvR=VO)d=KI6gA7StD#he)0<4B zWyi#h%d9iIUA=c^sF#U-=ck`#j*mQLPLH;aebg@DuygbXuLw98?cvU39p1YLzjwKS zU-b1suUcW>%uL-ThDQsy1Lpd}NZX5gg+$u1*G4URdvRa4hxHB^+IVm7t{g6O)%EGq#BMN$( zG3qfkx4N+*pY=K^)>K7K7BvO%R+OH0U@kohv5kL+J#aDCGdTKYz?i|@%b%m5Ne6cw z6pl$sil&bbO=?Kl(yptd8R(P8B!5arRYZJMEAK$&bstW6ce~HJ->q8(;VRzKllr=i z@GouzNhf0y@-CVO>2waGQt~X(4x`5KDb#i|uS*BvTHe!>_;T1M(2}axI;lrQ*M5EO zxU{_!C&080Qc0&>449!;;M=QLh{qOoj<*U@OQ%^xrW&P3uwGh_&Q^(c;xbRz!Y4|n z^&oVL|5;+0ke_RwV3ixG5>EpFM-`R{R$i^U1fjrq3PCFI9nt_O3lYl_w_2p_DU=U7 zxwq;m9BLEJap|NPN`kpE65!8p;U~nOgj`bk`L=gFkL*Kz1{sRfUJHDlKEBfvyf)i| zQ=|)j>{%6n_cBnOp*}+qe;&d`YGJrc9hF9f@H16;4qr`nh0gArP&2qxJXDUf7ZOD! za7_shf#va3yWcyS(b1J9o3YB8kxnGCvFIsHuexd{ceXHoq+xxL#g^!3u7%nXs*Xew zCxy>L<-C`vMqTu(uLPxq*)R?~|Lju5wl(sQ6;F~*NPwR%_|{tTz+@LeQI#=uwqJVkL5y8BcwD@Uol>NP(@0=cVOvbvV3 z*zXA{Wz35%S=>A=8>ewhGyEn%CAtL<+W%vE8o|2y`G>IKZa781EsvU>%oWR%v7?_u z@B%%gVXl$N$T{UQ@Mg$R;d&%9`-jxBYX{u*1A{~o(I_W>-OhUlwW`c)@L zS>hzAKiicgZOIHKdsJO9G<&`=XvtvYS8+K2iHiNznJ;oP0+m)8p{^LcB665N^Q|yi zftRdy@Kng%x;w;Pq7-g?o(S(oIaJQ!P3W6!xK!!*bwvb*w(1_%d3_^E=G8lF>x?B4 z{gi8yaT~r=rBy#CCZ&zm>^1uqHB*ZwlEm>l%&hqPpipXV+E1dq#@6)QD9pa@s3;XF zjQI$uqQgRq`^)}6?&->$0twQLzw71fQi^?Exq`Z2{Rg*&g3 zR4KP3uKI{qnGOI5e~nQbJ$seiO-kjJ->0m^a? zPD|8D%VQ3XcPLEsB(zw{U-fo{TqDd_<#vTtBh2^fNeeg^M*tA%Z~4At!u(Gv!?C9U z8dl37c>>G81SZQss@7c><>VGF%RpD@S?VZdfJFIrr2&uN$w*q+O+iZej2(}&n;M(5 z8-s{bqBAsg1FqeNoEDmDPNqt4^HRJ96ogQVh4Jo{xm$qXwr}fhH6RQh-~z(x%3V}} zmWZeXgajPL*{K2DCC~4c7S+7|0nyvly z-~JpC)CqL%tF}P!kqK>InbO{TKqcwr`@gX-&lJJ*E+&hR7qx;y+|Ozd6ZD$DmV;}R z6L&PQQ!hU_nwVJ;?DF3ioU|Qq5s4zNYZdCctw>gCLtBcx957N!meUVLK`LicL(^xn zdN5*+Id!cfT%OQq$;#?bBsP=NIrX_Ig_GWL2^5TiPLUT{#y@ zWO;#uc;M)_^wd2kgUQ5fb+MOIz5*#5`&dKA<;F#5? z9=CL_9;PU@wEi8^a>FN7o~CbChvcrMFL^eMf}gA7HZ{7Yst-AzeIP^WgBZQ zDVk^n0>?JFWDVn;vBw#|i?CcAy}i5x;VL;O<_J`ndMf!eul_W>s1~8wi(}m^E4>+e zxNjVLay?F9b7&kE5IWMj5wRE@F}2T1Beor-5M35^mQW)-UA}Is9U1ZZ+8%e4T)ajEapG7og77@c&ie8k;J26Hy8BmjP6 zthaW-f^l>16SBn#&N9UWjuOQ*e`v9H_Fs6m=Jv8)eyJTh#JkWxe&Oxw4E*h7=7BN$76RjJq1e>1MJUu0YQfWF)GsfNxM{WoK1d8*vaNdbi4R?tzM-D*`W!60 zXcH_f1hAMkQg016&}A~lqtcuVH|^1|F# z;DBnY`|jgaPFxuEOUcYlfaA4Uj4a_rF(4FQcyn};W&FoZk$TH}A|}o@8Jl_n_f@d4 z()bJS&jv3qgq>xF>gIqbzX4(F0%34Fz3|pu#=H0igyG#C6IYZ1cpwW&W;XH#3kLzS zBbUs)0qjDtsnuxxDCER~g(Uz!u}Kj*$zb8Ou@^v{rWf9GX?PcehA%J1cxkp;tmKC3 zmO|rpmdJh4d!YY|ytn?U;`{!`pMro$N{EP;?v$2BkS=MYrMtVX zfONMsNOyNhcQ?}A-CXW`#@BnT??3VV;jX>UW9Q7ebMMTYefEj6OC#p@>tO%c?l*Vb zhWz|XKRrBZyTUU&76$<^Fayh}SNV0b&Dd1^(9s#3(URSQ4oy#x z_ROtWj?teFK{c8et%+Bzdg{22l2HP z?TjrkEq9?BB}eImu#H=J`_%q`@uK%URo+EV&<=G87M&b{4{0i!1$$!6R~y0e(g}l+ zr#?|QQdf7IF1KcL87{x3kc}JOnG(N_#QTf@%Y?ZCk9XFWd-T?5;sYlR)bvrMC(NG4 z5)8Wp;G95n!wQ$qzT0`3P?`0qJsd*Txblgjah-~+)}--GvrFj2QFz1T)9izEW}Ej; zWvP_(+Kma1*FJVH@0m}NUcs0&fC|K*G;*twBcCQvgo10o0DhiJH(avfK?3m^wTv4L zTXbD7m3K;wf8OzWhkay(R>Xok1QJTIJiD*qUk@TIH*yp~Y?# zy6Lwwu`)Ll%PKh9r?+9XNjr~@P1*;UxnqlqXnZpKRPsH7;QTofwwuf`P$4^W&`S&!_gHV zl%j{9g~gd0>VIgMkgYf>NHE`dBVHyTyl7gU$Jln5M;+J^v=MHvmif%=n(M25s)^LJ zOpNT%(B~9!@7GO**nBP2$u0%-m2Bu6;_0)TCVl(tj3D#@h~j(*hDq+YYJp(G|xDF~iP^ z(Zi#0F~ip6!tIv)=JPEC1%Pd6OMh-mc zQK#`3+@@tJ-!n*j+jiKf_+T9~ z49O);(vo5BIH|0b321qyr~BsqZVx#=!$ud=#l1Je*H#DCs?gMS(}A#lDbcPS)5|x@ z0DS>Otv{^gdgcP)M&yX;7i*Alpdl=MVk~7y;YD-K))}r{I=p{v5x6?-gK}G3YhTT7 zUx>B7Q4&oeVZ<2sv9)$GO6v7a>R!6nc(kDUc6wZda_dbjz>P(#Dc;t>~O1UJ+p25bZWPYL!;%kk@qpP-${soH}m@`~J-f z9^TuL1i16SnVyJ3>c7{8Or{b0n>&x|y-0o0U8dLB)ANMgmvPq{_E!WPW;?i|}A0?)rMOVmmy`T}gZ+$id`UmW~}7!+)@; zZ|*Rrd>QYZo?CkS(%nKAm(J6zkl)&uRA-ug{c*mmqWR33NPM=c{#-FiL?79EN6=&8a_*<9S86#*lxzhgEcWAaZPiNs*L}d z#eH^}SLxJ~u9~SOz5Q|4=Q8D|?$i>a-owdy{ZwVEc;bUUsJHrz7_;w~n8F--! zinnLW#${vyMzX`qlyRGW*L)vAV)tW)uPyIw%GvChMTd2g5oqz`RD5Z_(}-E*7fSmW3u&8_?ZxGw9mi_6H!G)+}N0pmLRuvyOW@C z1}yWJ)Kt_)@})u)Mhr8!+Z9WFN+F=#66aN< zzLsX7j!Smlvchc&Tkn0|d@{Z*cq{V-O3J!WNVKBqigv?sSXYrPwC|p&*wIulleI*c z^F81xH*imTM_gh|^0Y;MI6?@fZBhNm-gMA48%rnpF+~9GH{-Rgi1@)a>_ue5%zyoV z8U44IVmdEOhSuFe*gomn5`H*)6eNEk_@JaenFR%>aVdA|F0a^ zp=aIGR>et@^pj{3Pvf69Qr{?ClLaH}y9st|?5mVqE6|%H?rfBzu7)GlU06NrI@!g| zMwvbKmnd`QlizxrshbXZ-)e$&I8_d7H=}vb_d%^i{BGtV@U#2hoVC}lhN{b7AGe7w zvMm{6Kh^6FF0{Zx-tGOJvb4~Tee~Wnx4QNEEM5=ivoP$l$69=x&28Pd?`P^4w&B54 zLc|LUSa#sg&1gi&l`0&yi(%{*x8V_tHGVh9)GuZ6I9wU46`8c`FtVMvMcwd$uZZqr zw}Qp%k&p3OkvYo_jCu8T=n6$lAG%d6(E9BX@Z`|7D$LRPKPE19vZl4=g90s3@G(g% zvSr!fcqeg-rQw5rk@bh}?-oS;mA{tySo!no?a|}@LB{`}WDxQapY4vWDDwDNDn7eq ziKrUY&AZgyb|CE8DCSZdYiYp1X{Nz9yl%9EHQu*~-O^eG>tjD#l}8POBOHl^XW zcJ??+-2sDUSo6vvo}XAiiKdhpmb!$v=O=a4lZ_=pP@*K^*|-E2%^=u3cnBw!p_&n> z>u2dKdq7S$=OzX8C>nQ;UN(4@ttVCxEMIc^Yqi@%JA{`(az7Xzq0sd&y!Vsq^Eb9) z%P7o|o{um zsac$21wY@$Yb0>2xV6;i6lQO&-8|FC#k|6(N0h`IQxPF!IyB#x7jju=Z*Kdr)ObD4 zskUiJ55`b*Hn3zURuFnuDA-nz^H@79d_yN8adY5FI7=fYaf7NV)_(i@@nY*!y#QokYeGIon}u#0Ha);< z`|5lqK)5~vtc~7xwDPWsHvR?_28K%@#e2^NlU!*x>>XOaY$0p2Pc@x*O8%d)cKPwJ|u4Jr))QD{!2Z>GFzm|l!ROxA5^+wgFFgKmn>uCBY zwsir`p9blo>vaZ2Vypqh*^y;JPtGhYFW;DnNnziRV9C0X9%b6W~@J7bD ztTZH5>5qI776?J~F*$xG$^E_m5B>26{c$utNMCvo_*ML7{QG#C8>Z)JkK;lOO~zJO zKC>5k@Lr`oX-0RSA0rXgCXbcuK(#%YPiJ4C8xj%8CcafTI40Na>p^xo%y&`$p%1=V!#$;X9xuVw5iJ23iVK4I1Lc8H&WkZBCz^-#-ti$TB%#VeVV_&dob_P zdcciHK&t90PBWu$wdU?jT$$8(wg|7LCX?1pofUKEYxHlmF+($M3u=MM7Ys@3CK$Mj z=Gew#&^Wmp?nrm_S*LE_wF{E9W@?#j+b%MDef(>)fO3W}SnC|iEeBOw_#n8lfRn^% zb@SfX3e8jeq0l9`s!b=h`q`irCNN zL$LqT*H(ERGX-``{4sURimy9eFsUK+8YN=^(_jZmRuRFJm2W(6-;+WGT8kXKzS3-c zv7^~Y{X^X+6-C|Efc3hssMyc$upp<;l_v6a!JQ53nAKl$FQD_yLy)!(tL_?OW_p_x z54UxQm!NUgW#+Hs%cu?Z_)KD%cow$^HRgJc!y&;X83Np&!!)81zrC}&SpNQrqfB_ zs@@qRLq~RlM3aEA`k`HiH;&;0Mq5FE1Cd9NI^!ICBv2PKhVqTUM=6|a;(grljaW(% zbT%pbPW`yt&*;2^Z1wg@6l_6Y)| zhzlr{!yzNerTtJ$_9n$fnEdD0v~Dg+k#XASR{`&$Ur|j81ljW{#(w7chP=mJ^F5`T zKUb6wLo#Uf4Jl-ZlN92CN+Zj*ALL|~?iWWjjV>9q%MguO#Fi@>hbI}NWGopJNE?kQ zHV}>JtVRlX9Zd$YkiPRO`r%ZF>rJf3MhdYIx$`<89sH{L`G?bf;fsR_#TQfAOvt&S zS}!GoB0;-@*SVsppdULMemw(Hh-RrUFNboVlUX3b!Pnp6Dg$OnMbQN|jh$&MqS2TE z@4?UOXw15%>KBkvNix=rVccBN=wIi?p%!LVMdSzATIHk=zn-5gTG5z1+~s`A0%I!U z0Icsu$U~LURj253O+%peGVI8;7j4I-w`Eyh9aiyeWY+qEJKdr=)*wVqJ}LrLE)4P} z(Gn)O4 z%-N70SLoZ@QTM-}QXpjzw37-Sfz}+CAK#Z`D^63y@gCW+%hvtW*0h?OU*NLW|yKF=5(uuDBC$CO4(&NSA^)~UoC@${a{V#VDo0>RG) zE#}X0x1os5{H0>ogSaSMCBM0Hzd4$XdWw=Y|w(iU?!>JDMJB3n|Bt})HdA-b~&yow}%^Ex7 zH#kK{8m~j?YHrO~ul}e^=aP7Y#H~fJ%00A}{i-ty<>G8e{Pn#3(R#@-@D*dnjdx*!1k~Y!)Dy&A2=16DkwGnp=;+Exb$Bpgf%wT6nnjEu8 zp7&y=Ba5`WhL{qQgpRzSKEJkfaGyV-<)_eK@#TXIKE~fM;AIyXX7;7P^YoAY-Y4S_KDGrO`EQ$;7%uHY zFI@yBUh=8S>F=47FEm}La8zD(^cR_ZXbnza|Bqr=I9!7Vk*ykiBsiU2n8IaDIAVbZ zk;`BYeHjFP`~xCEfENUgLEz6nAnGj(ve&Z1IfdHTf6-c)emz5k*s;d?lE-N;$g3QY zLwm*uKjQG2uKxolK_C_czJh?h6bMjPU44B;%47FWL8=to<)GoimyiT@QmWxU@tyAfOEbP9U)U4@lDBLDb*mK>zvA1@-WsU*OoN z8a@7>2Wn8B3>uh$2D$${`~y-J9)uv36~rMfd}EsJNk`B(IW&|j)DHm3Er5F6ArFwO zlk9H;t}#H;d~<)i?!22=vc~qsmQma6#dpkc0nwW|EfU8psDc8+<6Uj_Z)j8r{55VpO`^y=S`FKeauvhZ_qt>Ig~LeNH6SoSp~Y<2J4yg{S7+ zl_=*5dU}_B|JI$`esrYS9VAHoZ4YjAGGCND1MLz#`tL4OsppSt*_)&&nHYG5CPh@s zzM|aa2Su&OgUMbNK4g~FVl2852lVuIS&~GdW$;yQB5|Cdz2tY9yoKcMQi>%3rU+TL zk@xD3;dms=Bk!U4QSs}FlPQ%~Z|JIq>@7E?72i8!s6DdYdtOW$TN-5ghlH%W7*K+3AmBoL9hI6t*%Q+4VNRGDCLZ;%q zqcm!&bMlJZe{*`eNtiFe9OwRK1l{XDatZ)+Yk$Ti57jROVE>@C%gbOBkQHX@2XB99p_))chYwtM`K>)NMVCympziKDQiQq9T$P@nAW=zSWx6wsbNuR`>?L29i<+QZXQa4{zQm^= zVG7^2neG20$ZQ_a+5C@LFd(MI$&u8RM@-~&sy}pj=Ob#sA&Sxmzk~teZQyHwS8vT?s1pUX;y9R}pLO_>@~D zFwfQ%?@n9HLY=3J!!nCz)qV6GIIRw$>1&qC>Q-*$C1sP7L7aChSc~s%i;FqGDfiAf zZ8W6N<+L>|Zt>Ob#zc%T^`&tLYy)8r8@idlG@HGRZ_S<)2XpSLN2hOi;!HQbYaJ<9 z7sg$olElwQXE)joAEp;~*GH375pKZnEs8L9X+9iVhT(lL|8C|0o`>=i$_g15> zfM?+Oh4KV<*~keCV0L&OQ_{56$s(6-#blk%ZSzr}Sgi&uvSFZ>8KTolIyf`a=@zPU z4)c^cI;|U;%Xz{Br>!Y=*Im$LJCRaHvpH!}Mdq1V?|9i~W}arTti5kdEWUpWv+|zD zn(GP@G_Dyt%Dp*?*FVaf{6e%&Y!McWR~!7Bt6mbw(cHekc2tSOQJ$Hb+uh%!$nTV~ z_!Wd_!-bI!Z*$jx<4m7_3gV$Q&6j%3XC0Q_BuO-R%KmrYXF+Xa3YV6bo!?vNuDh0> zL}6kj`%0olT?9C3J;k+~nzFRuZoW$?VL9Cx?z?NGg`q5Ml=Us+xy{^57}3!pv^##+ zWigGoy)Rz(GK+~~pPeb^+*HfNJ2f#GI>B|Yd9}2ic4w`8n`ilLvEpz3u9(GQH%Wt> z7IyAMjS%!qwMcr0@Ba6Fw1HS;_VKfH>3!qKQ^&ZP_Wr&?_6xDlJjLI2*G4DIZ9EIN z%+S?>;j??ihiSptsiVYCCyuTh@=w>^s0H3!Gv)YfwRnmeSX|katD0ZFLr`)ZGjqB; zs4*q?+Jm~9Q34K^QRwCukU(4y6c^#0#?h8S^+mp~L1V}N z6CwW3h%|b}l_Ppa!J4#)#hkRL#ID)Y6%5Mdo6G#sH`k``AOpQXSSjh(XQiMNVI`qj z-&_+5(xNac(jtG(=$Qg5+Dg|cVI|!Cj4eV*3tIU*qsBl4PolKb!E+2s&X)zAY-yst zZFPi{H?LM4M-Zh}tPrH0SJ7jedtSyKe)P{BLzX}Tq% zXZGwtDiuBB^v@d(C1>m@kBwh${^XuL?4=Y$^6rC#qCOUff3A zaT^TZ0;@=iGQpz}+Hz6|Y#=Q{0grCnlC-06w)+L{`9$~hHrO28&|{8pc;^dTT_2^`53}77(;`oOnAFzR!xi9qOIJcy z9m^u^;ecyY`me-TFHga(sQZq^{=4}{XiZcN(BIYt)X4l>IC)ap)?O8m&ttLhw!KPg zk+mcNm#IyM1+}XEH?;F8_$A7^4wvobd z628Rj8VGWjpYu{Xam^fBQ8^ZMUdmx|ktJy{`cYG~?o=YBgr6sN6PR5RP8cE^W(~fw zEzhiO`ZZHM$O>A?hG!q^tK(bVtL=uW!H+qE&uUOIZ#Eu8RwK7ihqfM4DSNcuaXX-+O4Wc-kPu}km(r!U@^3ncNjfO@!V-wxBIB%UduPEDCtK0M|YM)*cYk1Ma9f%R9WUm zBTL{tbx7CVne(LsPJj=M!e;%m_X9_xuI}B;K7v-IW$+2h838ZMtz*glnYmuLr8u3t zz3=Vr^r2yetRdQ^sYTL31+7lEwM9c?k+f^Y-YoHoVSGm2^z3?2Gi@iqUT}7TCT;f9 zRFAHnx|+ux2Hw-L*PmBf>vY%NM-mkLSAw07$4TNH8d!PIDP%+8q(kX1nd;%b`DgJ1 z^~w>SyX2{cPK@1> zc;yg6z^6Kl?W74Ir}Q(u;abOYvG&HCuGL0xBYkSuDPn!r%=-NN znc(_nkxd>#apmbkgxygbs>gg>9`C#ngm+$&gZ?ZqaNEl-J9{%+sqM(ENOnH7LtgC< zc|n+d%ne@UfFe2G3j+Mg0qi%7mp>5Rh8Xc?M(f8&=3j+KF~-Wt8^jz{9R(;TRSwL* ztdy*D75v^w9wWPlOTdUT=p^XdNuG(~X@pDYG3c~Ux%e|s_a9o7o%Dq*UBPaj>J8*P z1hXxd#+y0wd_rcGMwsOx??oG>%eXzt58di!gtl!4r}6X`9)m^5*Rg!~`&4>~*-0a+ zSxMktw@GO^!Yn-6fw~U($ZL zu4uwaXFb=WAN70F5B>MRP8$6Vv&cv(OLkWu>LxwU!cJ|=aELk{n-{jfsamDnG@40E zTuke#zV%eJcqvBVp?F9X6#?7jDscZ|qrRIX)Dw$A!}(1dCY8`O9K3 zK15Q|$<&VH-yuX6s0RCr{%qBPbyv*YE12n%6)135x^Pz(VBzLaZRUL|RkxO9 zIs=!wIQ;R4k`wHD$9vw{ZegUQzDzg{W`6X-j zu-xd!$k60E=ECGirp?^5yOKC-nO?d~Yn2+^rCfQPo0Jp*^D5qxCi7yMfKXK%r_&W{ z?}T_S?)N(B57o|d>vSvCmcCr%Ei;{bCmtBPYhHgXKLUZ*U3u4O`~*pRbtQcSHH?JGzVU`O6HcbMcG-33ARuZPf?zpz_W?WdTrm zrT|oigUZapP3pVIAXEO2!Tg7O2AQ}2kk35oyTnGxVc<%^pAHoxsKVYvnG%0kBt&JeT(QZh0r# zGvc@BsfYe3WWa&orKJnau+E#(CV^+@GF4{^(WHQ@oWbabKnN#00ag9Tay+A>s=@Gx z&LCS7)!fK(q6WL84k-8jCr7r_4PIt&G_KsGi2?J*nEMa~4y`M9^{bbAJ5vO{Cztqh z-hDZ}k@n*VxWzU_Jt+!4{(TAX#(BWC9M6yDFmkD|IT0( z-DIHX-OLZp7P7k`(x3%4e@HdD2Y6cU$>v5#gEiRw$v~aZKPh^mU@|xwS3(Gh+Kc0D z5997W2{C3B3{FeKe{V9jpaPqH7gs{IVF>F{hX8#30Q&5OI3f>sC3w(#sJYN(ZYzIs z+A#Hy*DZg1>?;nf*N-ed@sW1lN{&1HeW3cY*HBO~>E2c?5%XSLd$<0}x6`Mp##Xnu zN<%da{P(_(|KD<+$<)pe3Z22G`EoCNf9mN25%Bx;X- z6eTg3sn3g^e;fM6t8tz_JMXW_WKJEu8y+;zqdEU|MgU21nS$6W>q~+nYc!)goiXJ@ z3d<)3-9^;m2c!<6%&X;FfvsF3z-g?e|Dnvlu7#1TQ9t>ereh;^vrFQ_QMlOt^C3>% z_-eMa#++>3*`q|ld3H##bB5Hxd{M)aP>vtlpSJY(tml?gzYFW%A9Tot@UNskeizD`0FEjW7{v@?zq*xv?LGlk>lehhlg=a|p-d*!{ek;rcx^(!$> z!Nvw0D7C2Exmf|_akvt0$($OFy35!8v`;1B>D-dC4l_7?#QA)iJc=t4!f{ zfS9bDyOyNmle8>WCQUAZo*KK~94%o$(Co+~3w`nfNnsqb32mI?+hXNgF}iPy3dPD- zytIqmTeIe9%L$f5Z7R(LOLga$OvdVxIek}5$ITpTB0Pzw12sP}4A$m9+&Zym^qJG& zaYuMRJ!25*PFnZNP*;*^_ZN+lLxMYn=7=}N@7`q`B&J=#x041o zasLg9VBBa9ig>?ZnvM)HOLmbIMyo_6*Zd)d`jxtYAX`@4lJ>Os8tvQUV~8hvE~eRY zPg{!C>0-Pkj_`0}kmGP$n%3z7EXII#c%)~>EWTHbYC>>Oad>^eu`fX?uQRy|M3{R-A^hMEs zbiHpT9&|w!72p4Z*VCh%_{jb{&(VkbVyyakv(AM;hVv- zPOBu}@gz(n>9ED^e^P6qLw6)G=#XFu6kNG@=jxHm(Mo^xc>F}AUw{Hk(6SK9{JW)^ z1SRmf4GGT})?q4b;bEl&+g{)s2W;)&;PUB)%s6083=R=BRJn0~R9N=@i z)Wqj9cw&5R0J+-h!ziUrVyS4A^Mnz zwpMxo>AW?VrRYK3uNsaop5EZ!Za4QpmCl@mRpj`M0se?>v@YH z_13(%yW_L%DK}}wd$Kc;ltYJQw*fG+21)ilTfFfx=9w|Mr>_E;2J>*-8p&r?7{*CK ze})o^o7{Y3BYbZur&eY*KP-t^(7f5pAzq`FW%YXaHoB!jZ>4E|=d2O@xPiU$i-$4l z6*OD)T0U2Ecs#Hl6uJ%<5L{rD#Yg>oQRD+Z45H=Ja>H0E-=#wIeAHWS-H&U zG`s#IzyvtD1%4s6fhC$c;ebWF4{mk5t|#--wBF*ywPJ{Y;6eD7U(%vO)hzZCV#vu& z^W-UJ!-Tj;Tb4n;f0@tZ5x1?8_16=Iw(Mb+xj@sfTycr^>gF^0cyNthcHN*w%Z$?o zd5L)!Put>_!naf`n20T_N%j^QaP!#Rmf=+Eq?P4a8|muyzuHZI$p6)D@DG&H>Ft$XX=B?aUTKjM7-Qk~``f$Ui8TH$o3mv<+cGX(*Hm-A zO)s75ZqYKPtGaK&TxO_D52C+frP15*q4BS>arr(lC zQLg?#QETk|Wbg1zXBb*IS-|+dXh8*g;110}_tq{EE04D`-LW-~%dUp`mlpTSfuA4g z%1!b=kQ`I>W)d>exXr}ixVXgPmZT-&sB^{ir!7!Kq|bLsKEg@`nY&jc$CxIRk>3(s z9y>p}L02T@k}C?Dtl6fzX$}s0^#%z)U*(2(VeO1OCM%i1$=#=VJEykO3byHm8BwWvFUZse=WHdey}kK5im^OSx@6^`-Q6c4)K z$}&`XKfM}gQaiq}aVfMXaq03MO_+dzm%?S^N#8xShd8hQDiJcP)y^dQaFl9~M&Hgp z|Hs`(BG2+S$u6}%V1l*)VDl~5dqR#ER$jO)2ZO8fu=z}wUxKWMtb;na+Q@PUqa&R` z|43Odgnb@?4?gkQ#VYAfBmsEe##y;lP)e%?&Gv_|IQ@WB%y&j+f|O;VGja=}-+x3| zxut(?XUvT1a_k-_pnW8x-;au1TaX0eUlDr`MR6AFi4c!J=^^m-H5n|Y59+$YXP7}C z2+rqfUuh_Y@;`i~Gt$2NovSC$M5)}KFR)o81B6ZLaQ`9=HIM*;c_f?_d)Pabp;vj}fLs!J(#vi9VPQH+eX~ohxjLNMC-h~V5 zG``+-^xngYe@5WB!?Fh{cjcBCXsnNVd{0Ms+!*)LC5_Y{!X5J`Qq;K8kOp=^`QMW$ zTp&&50U88IP(~(Wr+Rtv{%Tye-J*lc6DAlb-$_#U9NhhW|zgo$+a!vlW zZr8-Yl>6ICgMqjlPie9FujYgQH!{^xTdfj{GwTZGv6}i0SjKASRqz*@{`fX}RehNc zG_JLFs`&vW9#SC!<9XCf9(P###4gUAhi@F@qOR5+l9Gh9OXLrnMdjOYO}Wa&EzobZ zNH11X3wfToD5C{rR1yKT+V|CK;fzlwv{hq1qSeO@xe`4-${-apc(!id>)^yWx+={o zJbPojq3|bd zoI}=4IC&iv;=p}4+XB3q*GM6j&8gROv(ni+VWY;O=U0=7n6ygI!tuxNU-n^2RvJg%wsiX6%U#pJ6Y}p$fz33IPWLr}mJtIs00V9ZXo4F*ssBvp2H1ZFv zA<=baX}p|C4cMsa1p=-r_sS=tHaFmmy+y&AB}o_5C#R3Bf!8-p0BV!Br{MU=FW5=v zUyIYsC^$*y-$Tu39BuF{N3E72bAF1yqE_k0LulZcuk)^wm8 z`oL;R?pbbma0>R<8Ha?iNe0XzPRGsJf{w83{=Cb3`6Eak8d3-^3!Sd#FS zI@>n5h797b2A?K#Xq_#*hmHf^f>$`XIK7>`lwgfgu6*#j@2uZ{fSt+Ww$^C4`rh+B z+4|ciPpE+ay4Hr=v7@eW8&0CY|>sDl87b>78@*@R^3TohtP)33gYk=-HB0!ya7+_winKANsh+ehYXPDcKySjOsp06q6y3GB5taGl zDHk{$FZha&J~73N(1b*C_~X-8&A>&JX*Mr*68SyNPs#`#TfoXz9Q&rS?YQZ(A3Tf% zRgD?o<5iTGBjlMo02){UTq4}$fsEdC5~%IjkE3fnAD=PUS(_g)`jg>#E&ss14u#OY z>OB^iZa|A^?D>p+xaOEU)3r}VZILUc@0>64}}%yN$uKeDVL?LYg0D=Bj8&yFQu3EbX&iOB$Za+n#Df~l z_mBI0JcHvNmnJsKN{}F{8N#OAw;Ti2G=7-2LJN}{Or7DFwoHus=PpYiGt9JqJ{NHM zFf9_)K*`SAnx_exm0>_udG(3FEsplj^QSZS6mTG`hoImDCn!H6^FYrwpl34+x%b9Wl)?-d&v+()+Gotb$y+GjUxI!hlu z`nGWWeo-ll+hAI$QDR)GB0yp+zFD+;YshC^r39XEPck$I zU)a{D_=^vqtj;{1?RDSi!NDB2wuER|d0e!Q2l$%cPmhn!n!v`{*-1EXmT&_Nw-W;< z)!d&~GL5apnT$7vQFb5-1Pl{CD%`KB(+Juty-be@8zmMjm(0BmfXGUP0nXc&Hp=7q zy>}>~^K>XD=6%^+4?`RH#oE7wq#i8Pic9y<6(cFd2PA6v82UvJw=06URU@$ujnZD7 zQhMPo^n5y){PM}YJj1@Ym^5w4dGh|WM{(o&5V0@WTI=W8UaKiAx`d(E{ZxFg8b{}s zo>k)r|L+{AvQSh1gX>SA7cK`a4q`I@@G=Qdqp0fK6)7-%)mScm#%TsV%@L&>B^qzG zfG;KHrXVBS6*2D^;^!q*80WHL%w(*S?n{4)|4u}R==n@|@+&mKvhK|+9kkn&p~0(f zvouElVT^Z4>dxzSDxaOpziFrD7h~$6nQn{x=c^?S#8JyV1~c7~Av0Y*DG*R09JLIh zHPa;`%W=0hMH;n4G~%eR2J0br!7&Y3kn+Mz_wN_st0fQQl=;_A;{phymQkYEww!MKH+ShE37Jo7RG;N$aim>t}*@OL;P$X9JD*M-*E^99)u=ZQa zO)|Hp7YAk*jvMl^h+@)4ZcOE7IX^8*nlEtc&YQJ~n=j|rjX|a5^Q2}in)6pOXlC)g+7 z@2`dA_4kET>~tQG6B)%D{VYckK7`?yucwV@cV7J6qqAxB+iDnBhS?$Y+h!Jg+I(G} z63Ng=^IF}m*GaoGeOo%R_pQ1es`jAH9;lY~;$c&!he+K{T}8W7j@=QdL8cd9+m>D3 zE(`?fSV2IvA78t*3x62_qHb3P1`0fE^egEDgDRGy>&L3#P(0NjnV~V=qLD~tYOs{{_@xIZH*B} zB!X-^ByL48nm_eTPieZU&KAR?W)lI%o-Da!cX5N%68NkR@5e*R;vAB?%+L zG|ASZqQKSD4e*&bI$yrzk@BwFyI#}wzM%A`T`Tpud+De^?GajaE4?Z&Nvt5Lt00*X z6@9%~ClvZ0X&6GTtVMf*fEZ#AZdmY|3Bsyo2C7DVB~UT%Fg>Fmpx^+VmFAOy+(D&* zr=Y%oz8>(R!G%9uXQmFfJV9Idz;30F7T0>m{repqDtmIXp1VAJ33s#Z(DFA${Ep!h zZMCS}9bUxc9?rw#u*cfW!z4=2tJIgCszU~RH5MrlA|8@oy%|_4YAeZ04~EvW7$ZjpAwJ;yqH*Z>PWL%(08t=oq-_)JNcsbW}jJIJUG_?%_%w6Jh}- z1h!M<*8@%rNF5Z|bRXc-bicZOW-{StZxJm8r9SjwZP2%0_-7{D!qy8miG&tXo3QKD zFCs|plcn*bwSxseI+6{^{K8Yi{>@n1h`nP}!hMA$sjpJtkD-S+lZjTk^3toI{0M_i(WzgHY>&zkm zDd6@MeB=qr*U|*o08rE62dG{I{0eyYEU|SEv}?im@`&W z#AWXvhW8mbLfIq*=*_o%QP6w+Uz2 zyFCuN&jatyednEX=A1cm@3P$8g{7MD#2bjjvmDh{mDE|~sOvkJ zlyJEG+IZnRukCV-`k}NNEn~ zl~)z6GarGic*)8APiT3@(wFtaI`4<@2el%y6m@Jq-UF2hJmnb}2}v&Jv$i@u)JA=d zXZlx@HLp@=YiINg#!rXL3Mc=Tq)ZXJ%h{nWPuDCqDO(f&D6tarPRh0Om?janZOaPm zFf!>mrU}9m%Hfz`TEO#i_{Px^o=J1BoW{W!+e+i?8C^N90B-zZu1tB;x0zQal46w| zu9LU(5;ubLux(57LMS`GW`P!-I_CL1ytFt9X#zC!{BMkQ+P* z5W&m?8&yVN3HNM%@`a~!L$R|VyhjnijX|C=~g z&#CWJi}+5KVOAzXN}z^&uO@>QEudIEp3P|91JQ643+-%mj(8|elUY?se;D3 zz427dMnC>SXi!6|ZAxRFdrHrDK~g^=Idqm)P({n-m?Teg-Ba)=+A^SlYscYYHvXY2J|g08yaHxTpGZShVaZzm*3ZB3*kT?(7En9_sfA_jkh%uG5V7oaZ8+R&a+q{8H2m&ETU^qPWfWm;9|t zeJO&O+v}5lQw(4H@=Yc$%FIS4l`MbRI~Ft)t#aeK=4?(Cb+g*8=| zcmJYlY)_J2=wDJyyHfhgQ`cIiziFEHYvue>RHj>;x|dYpp>MnCyVuZE>bi*OSAMuU zBR{#mN#a`muT;HtSgLrPT3zGfAuFT_7L#4ncGkRiS8597DX8@OJ;PcwfkAy)LnL~@ z#b7pN>G!f|FFho~5Lcp>5jBS^b*W0b!!Y?@xi?lKX1(%nS{M!e5e+@=U!6~bVb{r@ zK8UGG-FLY}CQmz&{s~T1@*U>zWKP8AX-xaP)Ndw%L79kOd7oyo)fkOG=uJb1^P=a7 zjy)*+a|RaapB7TiZ=T$y&(5rS(YI!Spm`K`uw(a~0nW{D;dGl?7t7cl z+i#D8!$0FcA^ns6g!7x^Q`2W*sJ-Y!_|f=mEHCM_O~s`ucD{yN_TYy7d==pHIp#fX zC{ZsO5r;St&r4+@0U+~wN%!Vh=*6p0Ag6yPRblxJ*h2yKo^u+%N;D+YkH$CB^HYg? zCpxit8kUWH`X|Q0R7wTyCc5>-9HB3;@RWv5oAA99#VY8==QL(;HxHHT{jXe<6M?Wa z)>0+;Xl2RLglX?prW9DaXZ_CSUy5*{VO?fl&wbSdc9Gs{)gyhcdB1` zvq-nz)_GqZ7JPRH^RL`#Xa9v5Q&j89Z%e>+OCFfMu`*1~5YW8xH{j(I(8`Sxa347R zCG593=*eovv&ov+mDcVdV270Iz4gtQ$nKAHom|$yRn7OPC5=#9vFz7Pdljm-hb*NN zAIuWa6eoxr+`9C#$K}*y>-D!(FS=9ZwslP3sEJOgZw)EtrP*<*HHKlvpVo_fywnY5 zei4agO=&L8@?yHdhVpJ&i<{ugCjD>6&@cyk?Hf@)^XXsPwf1KJh7N~yYpIg|4Q6~F zPVp=Ka3Ek^=4@YAxYvItSF0eXcHz_~7m-Z)(O|SVJ|z&%(uOA^*CY5DV*5y?SG*(j zqDO?w{C1}N{y(NGo)XUYEDFdgA*N(cW-Xk5Z~ghL{RG)0ram*j{NG}8ebI0^YmD{T zri^P!eqZPE>l1_|jLBDS36YASLxkdU7qOT0U!LU(LAt29=%~(8CzIen@{NRrCPC)T zpz#(#`lhl}j?5}Sx=xMHw{arD#B5Z8G&bkAfBLe~K;zr>vu&2wnJenaH_x^^C#n`+ zaIp`Ra-U!f>zj-I5dh}+i9we|Nr;l;q5AbR3oe4p6}^IE3`Jbfp!ju+F)R*-hl8&C z<^(0jDJ`J!dp&)$dVNC}uh)K;+~_A(zInU2GsX?rX9?XahAzCWc5fy?Gyh>al1+Gi27 z*@$s{L_OBwg2_5mhNwwVqt(jq!PELlXbU%l29izp)II~u+Vr;XSG;@?8n!=KtYp&0xwaqCpE-zuP z-X}HXb6)K!zXzddzlU9}zzYi>W7p{CtlvwF>^@ijJ#?CFf(sWJoLJ)Sc*d%3w_B z;A54a8pD1t=&&L$bOYfC6>F=h4grMlBY|smJvLm53dXYI=QM2mze1_4#-%!l{UD9* zBrm<1$H!`0Jj6m+5F6$pC?~wW`*W34i4qXCJi_Z3yvKRzd7yE^tDWHX>3=_dWqIlT z;NRSs#~MW)$RXeFFic6e+Y-Z)H(7-+M;3ZAD^DE~ml27Yj(%itLsouTPX2Vi9<=lJ z*x;U{Lxl6RHu&@S$F11GTDx^&`N1!?13iA4=1k^QV)l;i;Ib6zH{G%pB3hQiOP+-S zhAUMk_uUC&-?cosi7gD`-smS~aiBEGDE<0Hl4)+q1WzN$_Dm=(SoMs3p0e%jF3)Zk zMQZrQ&)&n8FK6qbz%PjQuG>2FQqb&`xg(d{8)>Z5-!cj(Y_dU+*Av|kEyBXM9y*gk z>MJ?3jejYex@Bbjg;du43Su^KF?#I z)*3TAVi{CkS-JddmpUWf?T~DAUV}h(h@h`%P{O1(WlZk;SAc&Gnhc>T`{&kY`xtHZ ze=)BH)uQlJ%U70Mr?|wYgc{t_FYux-Acsa4BHz7dbR1Tx+@?$#g3~X^+-OZjg45A; zj*ObErUDDxs|_StQ!nVFFBt7V8jCzPZ4Euh*MS+|Sxq%7JCy`yt!cA==pt>f&Rg1I zc>_TxwSr5O7nR5NVvf@%FVCJ9@NHp;Z(%Dt4AOlaWNiqo?wF2s7w_W04G1+cc>1D8 z7=4c*zNMH@p}gp6c@CsRK=ua6 zjDT!lq;PN?NbBE8azq7&-m)233>Y-c{L(=XQxZTHX|Cmu5#D?LWLL|Bi0N<)+=$Sy zaJi^@t3t#oKj+a=NTT z`(-*1`cII+WftT>`S-NWZgrlD-pLbW0}Zjn00ZiRKS2XkLS=Sq9)t=+c`tgj4F z3q&v##=H&CHMP7ZKNxz3)i?Dw65SSs&FvT46_u2W*8FPg9{lT7UIDpH31ls_nH>6g zg~Oa%roZ>AE%J(euZi?>*H=zY^=_d(>#};T>{2K#x6IwOt?3~&Zx0o%ewRFwvedrD zwye!bJ#Nnb{H1T~7TtwHGjtDGxM>HO{CX#K(^d zZmEB^gYRF>#qQ`2i*5({uFXhMlLt(cnl|&9%4k>3rANvp;JMp;;BB<=_+-|k=1zz0 z)NV)E&c$l|uRS`ykWp#Iu6-eW^a*!^g-^?7N_?)6(uK~Tn!4Ex0MUM`L{-|-WsZ0^3jbP(eEy9mq1t!JLhgC#zn&RW_*B(se~>O&Y0BLQT6sOjg3pXvY0I(~%P!8T zrh0x-PqtuJ3(+L=R%3x8$C0FKsOQ^gPmUt>!B~|%Y2M7rF#bs3Z-I zByEuj8(9DwtxANk3WjM+ND*O}qI#Y-G@GR>n^1$9sBlD$?Ne61cIP8O1c*j3;T=hh zP|3GR(UKfAlAORmN&s8-qXBH;crI5FJDnQlzbf&@Dv`k07htRgWP_FpjJ*J3d!@lx zu_D!QB;|Nxwy#BmCQsFKNXYRZbU(?uw2^v~kdPrJXzF<+AAYiQX+PCq?j|gdRO5CinP34u}8P&(W%j%)x-pXeWmbh<}aSv#yKGg(=uKqTop?B=W@m}dwBMr#ennjXkBI<#WavdYhnP5*@Mq!GE*7^O$uIZU>MoSwJ#o({J z$6v`MlMjfs(?ZIRTMRo&b@ry68@EmPDPrgEUX#B}_Wd%Phu|w$Y|~QYoQ<^~Q& zH(NZ%VMnH)G`|V>>vccer+L0X8O>Pk#6Lh0FwAKkVMd$7G~7n>%l8{Rq1bNh+9LSP zkjf8xEpmpwd@vXn@ZaUBXxg^sN5iPg{;B1kcHx;pmql}cssD$qr}pD)VNTBO z>wbvCtW`3Ami|UI5>I9dJ`I?+pj%OspF1lMrbrQ9;tlN!t z$mLH6$Q?80mzYt+ETmC}U4-`arM$H?hh3TnjwHOV#^JwP>o5f}v{64ciC{H2ENZVj zte>6uqA{{sQtNR&JNU4en(gah%s(eRCy5km0{<#qe1{x`GJ_mN?27uZW88D1V87D$ z_r47pOYA3gH(}#v_;>ep{-~(Sq3E0iu?R}Wd>odr*g4_x-@(b>VMgQuiKu6HkdLkw z*}ekM5=gM6>0@^U43QR%40b3^t*I zL2mf_Bg4Fepf&58pIwTh8{~$Q&+|LYwdbkJW)jw*{-@RJ3Z}OXZVQPoH$>(j2R^ut zhTo$7HD1(UQvC77c>jm=_1Ev3O;0ed%COit-{4kQ7XSTitOEP0n#U*JDEa0-Z4Kq4 zW#X|%s9OlJ(8!zkBf`17GQDRBes%cT=F+qk{|b|TJD+VK4KOjT-2H1O4R=H7z^b7X zgl6fhOcUGnd}_0_sW}*NtHMPQ^!7y3ueyGspSMPF6o=eoQ5JdRA{LEe7nMn0Ba*oE zp?ORc|DZpX6S(aQ`w=Pr=kG)9SvkztDXHkZRE3*Zw+}}Kn4NX zg`0S@6D6?FuHp7MR&~VX{kR-%0MZm_d=6NrD1QgTc$ZBo#$-thhTZpb)abG$MYs)6 zg8pI-0Q;T*Qp*6$JN(>+x?yK+Lod^_wx5+EpQqvJQF|%wd`1&t`cea?B)G)--OZ9{ zM}X#>9(1~)c`2eB;)*ay{PRZ+PXhsc`2^zpa)<*U86AKfD(LKb55pLp1Qr=U>^qu- zUW)%YCBsgzfrUgH^qw%_`f3_LR4D-18G!QH2g@FZpbP>p+`tTi+re)Df#TN&1Q+S>}8^R#0HsT~0$Wo7`;q+W_7#=eQ`_~2lN2f&ORfa^DOj-C9V zvEd9rof{1FdK~@BiMpo8?Ry46w3Q#P&iJ_$(fW;cL0$C%W2j1*m0c1S)QcN-gu_qx2 z&eJMZK=c)k)Btv9!Tc8?KyotmdF2N_a6Wbfyd2R1FJ>K}wdwa_!~h@UwB!r$5-ti9 zMQH%`I7tOWL29PVTe`qkUZzzHgJLN$1io$g!CKIV8wOr>#{(g5D=?+74onS|0(d=| z6QIiq<~mPivOo;M1cA?s0L1}&YECPc7%+5Z%TVJk0)lfp0>NFT0k@aK;P3~;8R-fe zxV(gcl;M2&3IGQva3-7nyEJ-zp61v|<)tWd2^5j$pg1FC$@!PSqo2CMwg*^z{u2

2E=S9~XS4@7V|Rdg4WZk{d{hQaQ+n82cB=yIDox_a?fBjivIoFmiPl=Cp?6It;RjsS-Z1(0qV5FFSB0P50s#3g%fGS=kzd0G;e`@6`*pWB^! z?c%VI#`<@#5YIQDPc%JrZ8Dvl16y!#p{sSz8p&US7IPki=2!K@NJWbopi1f1nikBjr7?GD^%@wf9 zDgpfBL`IF2&7gZJeqjRvTT|G(_<)iWoTuFIAOqwL*mqwPfZavK_pp$|ao{iN0tB)x zz9UdM1_G~iehZA}p#i961n|0mxuOXa>{J1g2$L5E@IC|rHy#?094U(jJlhz8 zRPVEap!V$n_<{-vX$#BRmS~p&7MI=vXak#Q<|T!fqOG<)(p-cONLIH1P#`$D{KXX)+%Vs~4>5Mc^>+#Daafmx$Ri`S3qY5WGbo5lB1{Kt(4;2$vS zh5=;F36?=_>Iy(1$a$@qWvv&SPSy&n0qlTF##$0>@(7hKSbd5_F#ic+ zpNpoWK&JTp!9HwQc?rM~SZ}b@AzE<<=cCT99OWMT z#Ajfrd@~re+?*-lt@9`oc6d6+ss|J=m_dbc)W@tdfFw*3OgME;&VxMh5Cf=n0pO|& z-~xFryLD9;q@Cuem*T<i`tdqWCoZ%xdvCOlb&5LIs_X)-?gn}X3BFBri??dy)t$| zX=RiGpBb279bp4Nf#MG03wsKj6oPX0*afFBOF6K<><*dXm+|sIF_{OT)(*f$41fp$ z07HrZN>>3WU4UuLgX_y$`Y8Z?L*U;4JoPy7(_xB?0z0N!;Z0-@$Z9aSGX44l7Ws)6UeCFFR(2$>p`dCD_~`S1pq4u|HPFBK%yKtWG@Gf;vTn3zrcm()sBQ^b+F0K(5O-;<7+a)D@?_oM-Bv>LA@lJw!{F=${0cqP7kWt;QNA5M;Uk55e zBLM>BIyUcMI1plvbIY?H@{_6v$#F=8kX|{ z6Jl`q4i5bm4hII^bTSgQ5yNK=LyL2trWXA+_x;HvdnBYKePS*Q72hbf&m$)LI{K;} zwpn!$x?6~8L{JdDY59+|2eiHh`1250dHiaGu)aMMJ{tJR=2^PqE>T$*0I_1o*Z$AsPc(3G|C(o2C zCx&s$B8!Wyk)v@NWkBxfNmM!gq58?vnho9lnCPf0Im$zX&0npJprGRyjT%30LiH>} z3}bXZt+6_HO!!Ea1%P)gPluKFirBBPI`0?Xgf4u97{;WQY_U2^%lk;$GxHe6+>QL! z+PHn}dl^#RtFaFn*+A6%Zb)n6kQ3OgKS6vc1*|17T^xHIs@JymtqioA^W9S9`F;l?N@9x{s@~`k3xY>Y#psrMse(u2FZyWiElg%+{91t!A(-9x=1{P+8SiRnEZ-DhVdHZ&pC1dJ>$ehoQ1 zST-j1*Ip!~dq01yCx%rRz3q+|(#ak)E z8>CjEsdkl@|5A_QT&|?8VTKf0dfKNEmupkQ{dxE_pT_?M^>6kz&9TBM
wYNzKm zmCF*VCzp<$-YyU7%zY{T=ys;$Nu6wO;bYkbKa2V$#21-*l%eO)RBw@f^H8@whsGMk zyd|(-gWFT8L4}=E>KmVkWAwq{qDjUr$lf`WM0?YQR^-j@2-S)R*QGnLObiBosH1WW@u+C zw6h)B`3KtB0qyL9cJ@F!`=Ffz(9R)fC+z-Qv--3xee%EZo8404kN4+OBIbTx`@95V zt*({HnM;Dxok|XreM)v_dc#T%a2&BH{&c{;U(K)g%h}AYchkJyEv5P7=VkNZd*;$7 ztpOzmS9L&{)dEu#mIPM>5Cp*($yYV~S$=UD6C_1pzS`=&qx#)V zw1H&nLzBy~Wulms=Y$r5NACR+GIJd$Zft@_6Y&AcO@u$@At7S|`O*}6v><6hKog+^ z<(9%9Jm&@1 z6R7hUZPU3X8|>_S-miW2Sif}0_c*j%o@=N!WQXbV6p^~u+if=HYjdnGS!;;I+7?+~ zzF^*KJknGddQfo@#^tuL6jncv-p=Yf$ZOUKA~_N*dbL=r2J>*}mEF}TBU!Ib{GPiV zg&e`o)g#9q)l6=lw2BM6U{U5{nH&U9<4y@i z^Wk2Z+Z0@L4lE1H&tjA#kFKhoa*7d=LJ29wiD-KNX~(^+Hi`K*7%7R#DMZB4TcbVk za@;s3@MD-HF6V2acYq+I0R$8f1b|Qhg!&`#9T3ETFaku}BOxU-ifW24Eu#{Q6)s6d zBS{@C`I7Sm(RXp8kX~(V?x#8uwCkV$_mcKtpd=F~Hc^&1QC=^z88KnWA0V_~ZAl3bwmYfth%-$58?QaZC#j9ZB z<-V)KpO(SK7WlYb!`T_EZf$C$l45bU^R1F3n2ue;?kse?UyUuopL7jtBjvm@LG^Pe zHlZbaYl8mjb2mh}Eu6XG>0uNv2ddWZPch(2K>2BZ9OEL5yq{x=lo1_ou8F|hj%?Xb3!TUKBAKf zd}>~TiZYlK!MZ{{${4i%+(Trp^NSvm(uY)%DApC%>zhs=a3q;Z63x2eQ4@mE_|!gv zmxEU8_v8qp`Ns1GlDW?4zw<8$e9(5dZC;I06{KP)1(9;)cAUMIra4ZV#28-Q~tzo;Z-FxHuw*9hzT;5d3)Rr-ueE! z&rD72X})W(%>A8>X#e>D`*Hg&3bU{rQ=x%FL!{bI%WNyPwMnP?ka_u#Ja>=^FK1Av zY01`-x?qsfRX17dESyt}<&%ywZLz1(y609NDb*|a%y1s|<*RhhE_a4OR_uru>lIjA z8gzoGTC831JRBYIwGUJMsN1M=?=R^YXv}h|PX4Hxxs0KnB|y8jN?qny=YK_TGY!8@ zt-sncUcw)pYN?i_J1Zk2ja$qhJIm--8}!y?Q@hM)%>73|5$}u3V1|gt*+Sk=Nx@x6 zcMvQnb`bRN%eWaa)jG>czo-p1*-FMaTKYw?n=$QUs?)&Sxcgb{^lxMT=v1<=+*OYRkvcFsf(~Y&}g^a;Ti*wQFwq>`L2=ao{QRSlq%eaMc-jZZ^MDYo`-gF(K;lYYt(-sHb^M;cYGb_U1Gibt*a!AUCc|c9_;`9@mV@kWz)xI z*5dRRoUDW|Z|&2W;Q71Yqp7T@i3LTxo%QK-quKTHV@d|bPR|*I|4yi)ov62-ks|OR zXvX%@Coi{9Nr@jDG@!vm`@yv3zs7gn5lQ5x_m7R3UP4l(r=(h-!zZLstUEdB&ZOnB zkq#R7kBuzQ5Do>6yrg{-pZ@7w1}D>&&_>bj(q4YjcG~riD=wCu)xG@rCj#7Sx~6I2 zE-_A=OOoAv4T%{LDHla}|iK0mhM>R@zUnJ?8 z`b2fcIf+J}h;EUPULnkh6?joUzln)JPLek@LE3`MdVFn^;AMQIFdr#oKwUplQjZkD zBZcy)LNYZ$-6EWA2moDfpWir%E!W0&c8$crY1PBfBUrKd6j4`uD18@cB(kglljPl-)i-A(+*Bno}edt zyj)HX>Sh?6^gVy0r+a+I*6gU`w)*sw+p1i>eghlX`nWZg?&Wu~TO5|)28{`RLEj0L z7Q>BI0s=C!NvsnU%Rw$>MUD~Kkjn6B%!P!%)7zX*w91<;6 zmB@|zReUp;Ey^9$%CZ9$8g;Leo!c?EHZN-UHd_T`QVlOBx0cC^UcZW)Cb}P%*4=T| zv|BaWtdWEq*@OjIj1qCt+vGKYkFvI}t!rScE|l0XQtSKY{xOf``P`WCYcQUsYO)0~!RlK;gN)Mz}c(d6) z-PAPA*EkzF+O+NZwo%N$`1D$!x$o@ym0BJFu0s6u7aon-1O;7JV-vO7S#!G!Z6>vd zhn^Y(J2ESqI^E(@4UL?OGEqt0W=BQ4^z7CeR|ApJDc^Cq{WIJD`mT&MN|y^A)mlp> zYcKe&lgS1(t;iK?`FQr+CFdLR)BD8!MHGhAwbWVZ$1?wBE#|E_$5E>`zN&pkN}Tjf z^+)ZH0~SwhL(QjoXC-LiWS#8X6l<{MJR5JyTqNdn9l7YA3DG&F;)wsaQ26Sm-Hx$2 zUBA?f2gO}*e>eC3i9Im-k^E9PjQKt*n0IYLL~UdA>6{{YWRf7cp;hxoeY3ry_MZ`Z z0;7bbw&N_Ha_V#kTz-+1W6aIS>3(!lO{?<`5cow^^wKVh7+FC?A!E+KLNH~G0Mpvr zp~Pgbx^ofSK!9e~j(?*7Z!avR#5)-?z4$&oSD?P!&KSI~6cn_@gS}&GP{l3I1u4ds z&vD+fyw~y}ZzMi*2)`ce2^fTN4E^z;lzh5%)EkflqeCheLK~^dV|D*U_31@u$73($hWM(F zK3x}{^UtQoM~L%FBBkB{?T<8Q#!lZU{_7$Fgki6*b~utw8sI8}07niu7a&@AB!U4! z?fjF&2TtBaQ&7ojh>$w@4{-EAln6w&v}ndBAq&A>ucEt{r(RcSOX75{y$~3|XnTlQ zBND(1(+6|+i$Sg=kpdkgfG`6@1|X6i2{Hj?*GDC}fU?TtBSqr~`QsyHbuIDDEnlej zhPdwIBgjBFax}8SCR|SME5&~&NX{?2F?s`PS`0(TqmeyN{1_s|an!@FSe;Pbeg}x^ z{8JwE2}L7wJc#3T@;?*6TgL}BCv{2(W;F6A-XR6ORP8z({-ayzS^0N9VGCdqIfukqU@2_N~PCpfYaq{hhxC{x3I= zaU!4NV7PsBhi{)Gp0`&QlaKq`6TuS?qb4582`I>k$p|iLH~bH&6w9oJ{V{TfFxIkP z!!zySN0ekQh+_?kpFDUWyes^zZreRg)zxz$WcIs=^F4%AmPki-v-I~b7?IC!PQg7& zqx?eoyNIt1mlxS|*~Y+x0erd2dwZwg{Fc(6=|N;+tlMA0RL@f4%SYRSJj8fDg1l-a z3NteN_S5b1tiZvwAyY1E1VSa@T1g4MjfY1uFTh`BB4G(zDPW&N43qssoY)}vu(6}U z8Gq8hHAAJO^45>8l@ZPS%`HNRqYdL( z#D2A~sB!rRjtkeVxUBE@tetj>gfkZAB)e_dTL#?Tb^I!M);r_{MawN)Xm!dTg)YqF zZ^?9DUAn`fHbc%?cf@YWjSM*lB!)<5c7x%(j9p0z+^0@6u zD+T8dl+@ro&>OdhaKy0UaKas;-Spl<4*pYGW^X@@tLa=`(Cv+dYMsKFPj!!gM`4eb z)x;Hx_ogLLMqIUGD^!olJ=R%hL-rNxg$#@L;XKqC&72{A`IGP;Kbn zu3$dX*!Vj0ie=hMW3GIZM@c8`W{^D1xUfgBRFr zn%&jOi&3)giZn6a;oAD78dB}guI-R4Cp0^I)_qD(n(#7Ogz^$(Ue{61%~$JRampTa zBr;j1vdcd&+fEz#GrloDDmuuO`?)N4q-31nz2VJu(4x==so&~P2MK32SZ^t_Y`P}4 z?2fJD$pNQPj>U9C zlrDL!S4MHn$iDbR?)B?th!=a<$pP10qs1}@X6)_mKZ0B7^b<srF^?wHFmO^3(SI>#K@R@D@b` zGxL`tlB=ZlJKs7iVYU;*MK}T}GLzu}F?XxPHz*gGVUKqLpI4+I z&Ll4k=Wg1u3d`+y&5kb@>D_IdK1ZcK0%(|5#EO+0a@%3Iic25QlH|&ttx_N6QS)+Z{1uTMgbc|%KEO4SjSfD$j2x3I z$C3(GR1@8yUn?}8;QD!XPej!_Kn=FDPTF+FYFDA>QjWg~yt!t~(^_e6*XkWYeaDLo z&dVPDc$)|Y`2OwN_19y{-9q3L(yr{U_j|)PmEidS*XE#f9e_BJRUHA{Z$y!{e z4Q&5)vOF}&V27B-!Z=Jba2Os#@R-ngOH1eFJiR8rjk5a24L_pB2NyT1Mg*NNi2VqD zoeDqN%5a1G2ky*V^5p#e&2!a^d$t2_O@y4yeG zjAR(fBfJ|dAFF5foO<#80_6;G)zz*xcPC~eZ19a!(?eu? z7D9hv?9$(>tq#9ORsQ_j2wu9 zuPXS$bqlZ6^2ng=$*r>!_2KoI@%}+uVLPSma4uDtV?IZO?_AbcyNmLN`{~0a8vfIP zuJ-D%`J*d;uon}<{W&hyOT)%(QX8Xwv72Kjj-Tr&u~R`r3rFv@{P1w3?y9NH2MqWd zKAEmrgd)DK4CDXNAG1RlL0em%on3c7IXbikA8jJv-EOysrbmC_+LpET0fT#kYP^JZ zn|XaTlh1#<(}y?x$60IAdORY3_5o#>N}kQ50_vYNM3#5Vsl&|4ny>n$CSJ7!Mj*N8 zqaUN2sUAALTpU;M<7ZrrVRjq~_Xp;5vpCgmuhz!F*rP$Sa^HNG>Egcf2CpN5zhX6&5zLAq_+QVlcR4_qHoE2Z_}c0)1z-QqHi;!eZEHDe~tDDh`tYq z_W2fl|1H`lF#0|)+6Nka4~_Qu9)15k+6T4wJ}BBJm})~zeCq>rLmIjv58Y6LZm2;w zw4fWh=q6i+&-b1g; z@v$EgO>3sluTtf$Db;`2EUOB$WZP_T(dgGG&5!felVXmt3W|d@rp5Uye^Xz9MyVH^ z%N9Dam#UjCBNu9CdC2ZrGxX{9$S_RYtrsDnq{zU*rLfIV01ifoO0G{r=P3^bBhTbPqQaVY)zP7`NdA$ zi=z~ptvcjss?6q2&3nn)O}gj0QKrZrOw-H$A=S&nd54)78RjBG!`>G2cW*KAuSM+- z+C$1{!O7!j!ToJ{{j!aOY`rh4=PSWKoU2L+)yy*q$qjWv=i6oWRQNL|s`piogu8naEZ>KqEDUxZ{H zsWbZa#Xd6X%aV_3leRpoe#*5tr+!pona7NbR?R^Vgi}0 zdy{_j;UQMy;kMr^5%hh!WCBi^D9)%@s`@uCMRa%#y>5S%oR^$f@7^pFjfF|i=V{gM z%at=R$+s`qGUjSYMKijnUUjgvn*e3>}p z{pk(yDpTU}b)X(iSnIzL^8`Jo@9F=N#thv~{a*^f#{1ty;r83R7WEyfCG=9&Kktuc z!H#E*@}bufSXM0Ca+D|&{TZr9cO%qjexm1GI)hYoRVL(|-lDMCw5n^C8L??5HqY$b z%A2j{@0q)>GKA3>;}4!$ENoUG))$<|=L7{lpXv)7n}4O)XjHHl9W$_ZvM*Zb|08F= zDE7(TWg~Y%v5cWUN{7bA{Y4!3Gl9h&%bPKn{!3g3Cv{ARu2u|P!s}SN<&8ukJ}Me_ z4&nG_(dV%_;4Q@criP945fRQM7M=q3ogW+T8Y!G%&zE~!Z$%Q&Jvj6_sw`cfN{q8!xG(Fga3jLj zQ_dN~Go?j4gKGaJY^L^IMtptIz1=mH=cSbY>}PMyjCn@YjQaaZVX;kePor3h(`--F zn`WlS(;tGmn|W%f{maWuH0%mvixqH^yx3_F*3wi2?Se)J-dUt{45?f#lAN<9c@4wl{hh0iN3$5za}>7%Ih_v!N1C#yw38* z7DwpjU-t*?lnjno1@n0}or5Yft?k}qR`-mSn^WRf+69g=*onWLgP;*&wsS3J`*ocMLT-n1triYrZE!RkDnTFmo1My>K4~0k zvGer+0w$#cfmWOCLNwbm)4NrP3k`x0gKOsaNXY8TFm^ai2uwm^U-u*F<1n%nbJ+*-=H6$9=k0m=s)D8;SE<@jb`D2c5oZxKlOA8+(wY{83mRAXS4G=x+b!6-by2kY zN92$x^7zYaaZfW>K|7Rtt_)CHT$*I8S8%`T9x%;Vh3piM(tW8!5`5+On%Vj5DssDl z4&x{fqB(Y1_IfCICUaxegK3YdSUZXK#g`ZF-J@x%X6zC=)1!|f%Bd`P>aV#6Pisz_ zPKQ+?+Y@8B7!=9ke@jG!=7aHsvwZpMF)MfVu8OiP>J)zw8i!~iDkI%8o;Iiour=sa`17KiXp@}d(%cnRZ*;_0 zZxw0z>usEuz3Job#mYSqS$(NqYpnLiSdGO*P0d8j*F>$rL`~GtlD3)nm`D_x&u0(* zcQ9_DGui4{MEN=y8PV%B32rNKrE8a1pR2tF_sCLwk;Cad#O1BT#VC`A64y9P;OFaQ zR9cBsey$cG|0#fTR53A5xhJN|RU2Th8~Xc}a7cyDIMD6}#!}%q~AuOg!n` ze&Uutzu4;e-MOakRL%S{aGG|gYPEg5-GDrr2|lKLA}^4< z@=hh)g2XJ0>WTc#jhCV$WZx{Ad-rv(56DM~?vj0*2a5jnfqRjn4CLP|S$g*$Umwtl z6qO+VHV+ho>jRG?MfE|;mEL`$>jP$yqF&^ztIjs|U?iZ3C$BWBq;Ha#wE;yUd8Jh){hY)M50u-EwT zZXgCIa>*+TD(S%>1}JxvSI{cy2?tFzVamxXe^k;lNX4~y`zq9G-vtS@DDyb&EH9;ORK84v-7!MM7X8b4+)qvdF zgq%l{N&n8G%2uQy_bTBaV%yLleG?rRd{i&gUMEjYJmrV-ZVK}dvb`Tu*#2gyn)Zz= zO}nl{DQUxR4ZG|O+IEkVKufO^SJHV&JjJs$BjvoRxg&vRBfmP@TZt9aP->EB+&K%FJ z@5;pGBdum7Awihcn1vz+> zcZw^t{ija`=><;|@+&k1qYoPo+fDVuxRxG7gnwNn+cG0=(XSJ<#W99iooQx7ggSgj9L>dhM{GP+vATK_ z^;MjlGwr<%xLuVFQLJbb{-eT`XFn{%lr*M>={ccq8wBMsguMB@@Qbu3_>1vB-R*y? z=fj=1mhcb}iIZKgu13VjVqBm>i3jmoJ$g5MalfbiR0y|gG5wOzny?zFQM^iS35z}N z8!MjoYD17)7qDM_*8TJ~gCPxR#dBZEipj{6ui_MzCNEBqXFFH(zCNIs&kU!;#Ojqt zX35&MeEx9lnOpEWTS*RaL&8;PaSX+L<1>Y4Q&M3$C)oY(NX4iF^|6Jr_KS(z+>b)( z;J`J|%U;srt5I8Lpn~%dH>fz8+IR%&wljbbZ!7KKGp`Z>VE=4wh8&ds+18 zu8z|1{}*?{5A6mNW;+VM{rvoGwRN-j^a9$EqW$!2fd~_MgwPWouGp<0czf?xl@U(0 zCkYv$&F+Nt-^(Qq9j)({_pbRtC(S*TM@znKJc%FZFD{ugERu>5a()FF%W0Hf2$3zU z-LM?vU1=#U-06h9JGFmUwlUUpx%+oO+{S)d@v4Xiv#&`o_oE6UL}^n+N*V`vh}BRw z%{z-Fl7;e~q9Lf2Qh8G-U+Ht_0M>hPUj0FdC~{(jXE`yjV|)nSaSMjoBcobpO1LiX$DIeHvXOp$(rq2#l7^k z!q(TeR{N({Fz{OOyBiEtF!Nf8%zyb&!RBdTKSkRhRJ}>@WQu-nwt6#m?%lM^k8SNO zg(ZIhe=(}RK}=&Z0tK7enp5e?6%xEw4)>deU;37V2Iest!GcZtCsVn}6;R$IV%{TC z-Xn6}BTC*QD&8X+-Xl7z8yh@9k9Zhi(8vT+8%9|8Xw=!()Ook^p^HA zes}$aJx;1^+N~Af=IPP+iK9H_j#k7xo@ZFklf4Msjm4vpK+xF_pB1}%R9xwEqt4Pp z0Up{+$anNIj6H#9Dc=zvOl!HJe7bI1{&%`Jw14l>=bfJ6FMlj{xat~m?_!a|-yR2_ zzmz>4UdUK&-fSw_;kstq20cpIi1dDApD0lnd>0>c`nWW`q^FwvouJW!&0N{6&p1oz z*q!(1k_RX55>Z>+CJlxS`Q);__XG*zI3-APpB@+mIvlf7QlwdKs3MEl{up+tF+)w0YQ`(cIEGKb-_iF1~+ou&Ye9mw&H1J-N= z@>NP{!N`lSnL|xcqu*CNV-5Y4DS^=vtGy`i@LaF?-Ko*`bW6ryiSw*-?Cyg~op+l( zt}MX`hD8dwANWeXd8>a2F~3<_nNghGrYDdcD(LeDA+5O##YN{A_`SG!T}REb zJ!3HjrY}=mSScf&CeNMD{+<8#<=QbJ8F;$=Eky+#S4|rO)u*jtF;x@+bm)AJ*NmPt z(kYG5?so%$9glXb6+M3QbEuxuHNrkKAmj{xQM%fvH|tksR*A{@cQ*fhhx6|?+VRz5 zHVG)p*T%`}PFxDLltG$(fEVGtCk~Fsf8TatGFcv{RUNWeS#q`d>-M#^N1$T3o=kNi zaMk=N)$qqOR{*&V#d}H)~m9x)T>;xegbzi*v#^Qemomg+JA_>0YI=UlEcKg}GW1(Jr%{++q;Tg#3u2xyi6c6xF8+ z>9yUz%pm>GLkhX57nSw6uxS(q7yWB8S1Kzks=F34MK`1-FgfM`>#lr>DIT z6(To>QP&r+Jkt_d2O$E5C>p0#;!Ku%nNVLD*^UztIj*+w87VlT;8Q4ya@s3`CsUmi ziWy+R-qR08A5q$DQbGT~NwN8g8(j<**GQltj~QCf4D#h9;^jxV#xc9nXS1x?oE1y<#hdFi{VE^o=qD`W6BW$+o zSa%zKdeh-%EmKibTFmGvrA-P1zQeTz8N=*{lH>kBgm5vsj$0M5LNz=re=~(tC}YLd zb339M8bs+3pA)%4wCmX6;1`T-?YsL_5lAbP0xlOqjy`FbqeGmWYG6a`?^Y^V_*mUFoY34C zOp#cBS04T{dHAZGH@2pMefeUTYnVPfO#cQhT0kEqG3*nAknZ5z4G2_`#fot8bl7;{ zF5}JX956AaQZ0Bt9&bd`_|XD%kvG&Y}Dm2?5zUjj?G&w)_KH@)E;%T?k);1;3%JPo{{Fq0~%(6<`D(% zG|vcc2L;i10khZi9h=U(Aq>hIir18#k5n<~?{rb+xb%*-=LpigX3V`7o7`+l)Io?f zb>2xI6E?&|qtha9F7{*q(aO~_6r+wIcQTHYz>eRB&vkfU&Z3*@V;bD7ueCiJ(#9M= zw#Lv4D51G>4%!FkQH)xB`_zkCE5y6LYn&mBs^ZvBJvK`h*#RvP0cL@$ZPR5^m~IWP zmjPV+w3RlW77XDHtVO5=k&_oEw-EG; z?G^$Rt$CWZr*8yjIG!^r7x?nF zgnjL@##WK!p}UM|&|S!DD1KU1G{3~w3yFYd%8L@N;}GnA0%ezm{xvciyQh314+)KM z#H)BoO!XUzPLo&uQI(vNoXW$^B-V&moOCua0aRGm!7FXE2icfCx##UBP`j6v-d95e zCoKJ#+9qK%*jb0E&Z%n@xoI26n%(CUZd5>;GNVI&>=eev6biPxg(RYnn5q3Ez0w4) zwT+&eW8T=jj`Dh5Egdp$kb}zFu82QzYx_bGlF(R)V(y`AZ+Q4E+G|3Ddv~gd5w7QI zFv8R3F)f4jk3`99ZJrTVA{SX9f__!sNs#C+M{FWPKzl&b5q%}{6yuH+4{rB?fxEI| z7FHwI3U<1(I!~?=<)#MtlZwUPE%@&r<0oNN-1jEkA1BdJ~;4u6zyAyig5CDoWgrv+( zWi12Q(XfI1)l-4n(kO7#V#8$D)^fr1%#BGQf?}1lxv9q3&(NFYheRV-~#Ydn6#JMw;G9Gkp2WowIiyb1UL{B=RrT=OuHe5Jr z?S_LVFuw+QI*)*U4>){KHrZq&S`rN4jWQ_(rp-bSJoNo3wxJ7X&GZ$RT?^pO;0Lsx zt01_T7Or=;WloDv5Q8Tc@4r~f+)#oiYJt`F+UjAnNglTK((`SQe5AT; zIjJ*CAac#LK2Zcc#1XZYsjh_K9N?zUb&x0N@DVM7kuH9_3{Nz3q#ThfO+v}1Z!*FY zSK2rk^42Z^VSy&s;WRyRE%PV@gh6qnEki08PLW?5)FB;Tz(5=fxen2M?LLFUY>*ym zMXQ$$BOrnvthzz806bA%8Dw-S4Nnx(=L5H~SHdRPWR!^E^>10lkde1`aD=xK=KZ zrG7;bCR6}e&WRyCzEi-0R0kTuhbd&t3B$=zMF#1i&;up7*VXRh{w!$^qiX(QZTgo& z4m#Ed^ovHLQKLFDrh5^y>77rTbL^?5JX6G9mcI1x<;zNkndT8u_@m_O?}yTc zbqiuRElw{%dfrNaY)bfnIcU=kD>DEF*#?`yLJs6T$pG@ED_3mya;MI9$SMJzt6(|L zEm7t=SR_0}H>`ldZhtXCHy}X&T>9-kqAMWxZ3soKL-S-?6tWd`ay$q+Y5ffPwebS6 z$W<9!B-jQ1RX5>@T+|@03lFp(o)oypZWvC*vtkKc^%St+=;4Wlxj7W!_B`;!97`kz z{x=C|R|2bTm==W9fYr8TDJ6(Kpaa5vM+|4PDJ!H$JTPKy+A$bt_EfnJCPVE$TN}(k z^Jjqc(DnfBj2LLu_Efl`XfVnqs6(?7*akbkpw7)hlJ8$Hl!CyMW#D>G3&R<_4z#3w zFm(qJpnU=(3xZW2>MmoC>J>oxgE zvF>@~+HbSjC~#nQ?aDGD9x1K?>#caGwcW=xVF@MgcWem`^gftug`Qw=SOcGtv->1) zJrBRjkfPx_DsX6}00${@a8RTNhbb;dQT+~4uEAQR($&Q;!f-vS>$H#}@pUlhTtDb7 zf1VVzeHC-T?8x7HE$0k zU$=V+xK;84HzYe4+)Ru}`6Ku^vJwWf5w{`%2ZKwHBKpA{1|BRU5Q0EIlweO359ET{ zsMdqd@;BRkHmU-_W4lgCU>6q9FEw1 zz_-otK%zjY!3_2>X`-o7U+F;pWYS`;tiIo?zMq%SJm=uPgB)Y~ZrEOeN>OXwo)i~~ zPM6WI&axIM#Kf($oLd(faGE!NxjQM6#%D5^@+s{!d+E=abYWaMZ{gSOcoyeWecoY- zyqDkI^Ri%Do?0VI!K;ejcO+0|_KUWZ4dgtK1+epl2IYOs zUkfdryo++4+Gs%pv=bBbtJg@~OEVb0FL}59j(xkL<2B&dHdsiqLdy1iih358AA-4F zYVKHMEyOc+7+C(RAb_%yPk)LU(DL`R8K@B7hn!BG)W_Wsc=V7Glj<#aPi)Vd2j=Wz zKhW(rS|1X0&0XTd-^*Rd6XXJgO43TrxyX|QB_H2Xvpo$3L?;ckhTbNN zonoZzKp&B*K7(6zXe}L)10kp57l*ai$Ce%2Ub;%?BkS1PhcQ$9w5@1ADpfCdXeVeb z`ezdPXQBb7i+`q`%O-(JT|k)nNDw$0$|k(a3SYgvK`>cHI54vdKAFcut=p`@Z%ffR zfrOFXb42{cTIif%_jv9adq7X$AD1a+jyDOyjgZntr{x3QTyY6)+0j<_l)066T^kWo zk0q=Yzp|GuDvU_ANllQo1YgQn4PWm6^J9OY4Ow~z2d2Z}nDqQm0!y=`7E zbrO%dNS}*8-L#h%dgLhcwMxnFunU{68vX+ zl~RYReLGg%-3$I)KOe{909uFswaN(Pc)}D`3hr(nel$qRhu=vr&e!>PzhsdtSDak= zGJ>Jf>urIr&Nw3M{>ZPzB4w-n?my@b*N0ZVMUD^#=LvsznFvE;xogw% z7lob~bw$URgXWUo-RiY+<{7YsyHY(*XBxAIOUxMfhc>YjIBf%WQr-`FE|!6df0p{g zn@Q(+K14jpJ?3PT**zLqM;+EmBq%Mef7JZjuK5@BV?Le_|-1RXUiCfC1y^} zBI-kgRQDk3r|=4%n644tT!TaGP*Q{gc+9HT>+4Sm`QRx$`cKn-2}NN?=H@n=FY=xe zb_b91?6wJWp)aVvD2>zC zJ((*fh0%y@X1mI@nz^DGWem21-|723wGy7Fw%#*r{Nz!>l&Lh)cVytoI44-FE>=;f zkY)Zu9)Y{M_Ixwyi)nonZ+Fws1THG<&Mx->Ucq?t)5X$QaO)>$(f&i^BOyGUgnHie z+Yeq(>-KNKm)|5^Y>Q*^Ya`endd+Bb2Yz9GyCLWwOFXws%=?w9;Q4K5?@u`OMUf{23pOB-WM{uVVPU- z>Se;!RDD778y0uIFz-xpwA{U>GCa%nA&u^_VY@9CaKB3eVW4Re0ik! zugP-wr>+OwxM5qmRaLHvYZXiCCv5}@t*~F z@63h&tRr|e+XRl~&WZA*I}Wa{K34TV^qt&>+i^71;O{I9OPzgR=%AR`IVyskDEt}S z&dHYXM6eXjB3peoBC|9MDK~dCm1@@#cAlXUzIQpe73-yYIn;8Ws|j3exCJuHcDpXf_Z8o5#TsD7&TSwhj+bcpHhsB8&KP^KylXq0*L3>scxX!+mvBO$rYdJr@IAAm zLLr9;XYepNe2+xU1;J{EU^YHpnb1rp?OxwjBHzOA(%Z+bU%N(f@_y9ZPh!f0=RT)r zCswzF>KprzWLYAzBrN3~d9pLjGF{{Sr*Gg!L+e$$`(F`=0{Z1YTmpG;S}Ps$7B4euB+8-d*0^CM=wJ2}mKpIsG_~f15$6)@8&aoh9?C_lf=S z_mE)&SeZv+;HsP=`&&Y5n)`vuI~eM1o^uC3seyY#q7;jH`je`x#lA;yHG`T0yeo@Q_pEp}d;!gROq{7hP-Py1ECP7(wC4+_3 zP3`1HzV|G&*I!VcuJ`K=6w|n*>$54e8(m>NeZaPOBUboRFS*qdtbvjIxLDJzm6yUB zeSt=`L2rc5rk#wUvzUz}MZS~@bDI?=a7}0!xy-<=R_fV}=HrH&gk^&Nmef}iml}VI z6mPQ^vO2=?Hn9u1`^hZ!HkqH27}gEHUw1_ae=VuOCsyqIO)3crsw=_ImX=V2dX=(+ zzd&(e-!Tz5Y%6m!^sg@QzSwTMdE8~B8+F*^jWZe9h^oS#B-#?}R>gD!JWcm(;M`GG6m(thm6xca`p{%C% zc5>P*pHQ8~ybpzK>9txNm}}PI$73yRk2LPzdRoQh_w#IT-JBIYyjqXBH&;kmX9GHX zfV5))s>RL1Tkv}|zhxm6HdjPh7Y=(x zeE9LM3d1Y%!;f(ALUs7@J9wchVBtyb`6EdJu}>=heOW|P$eO4EJ{Ymcytx6}QgMpt zVSv5jEnv~m6iWXUwL~S<%@Xv;@*e-ym*tqtHtY8!W4KwI*Y8zRG-!)dNAq-Z-bzwD z{5VazvKPzE5~V3bKN@AvDAdgld!<#tlA%d}{=Dgf_nHs+%kwVo$os_S2vxM0-6H}{DO?FZJ~gg*a87vr2jSI2`13G0SLF2I6II|WXRUC{ zjE!U7w8`eLqUd2sg?l1$Uq2eWigwHS^r7_CAJ3-?yC4uM(XP?mZBoN5P7WhT!Wv+nIpBxdwZ*t}iPxxRbauO^54bFZHgtiuFa!x!+a zxDcs10`dl7Ipr9#`_Ul4N>ofdsk$*-#RRp)tlGivSXXc1sGIMYXx{wy=7&!H-{$AJ z0n+s4o^D*5+`oDy&+D|`3$P1Lb$G{ zRagF6VAbf(cilLdCI*Z&+;{(WkZhj^ro`s)Cf8>E9@X=yv!<0X?2MVqMGfLtS{pR{ z;rlu%zsLip*Hj-qj*~O2D}He=deD@7lgi=#GQY1KBif-eG+~1;`_=3CS{f5|B`bSe z(ST0)i9uBq((klxzWx1a}jf*$2nJQ_h7x8#k2pW$(9(7d7E=`#5G4z$cyw-1f#CgSv zb+om8LteNEvTmFy%CtZm_Wtskr`^96@zL`CTHFhK|7!l-2j4D##4cwribdIIkJ1&# zO%>?~H}ULXd|lK}x~Ox-C7alAcRmMqNXP);`9DD(2;cq*NFW3`~*dec^ShvQM z{1fQct*4_CcGP|v()gTRAf3M6S_)UQ-~7RY`x3vi%MhK4kNTdKh>&qIUXQ6t= z2mY!d<@p9H1Ej9dU;i<0?C=Pmf&o5#b!TDSZuVWUMDhK3~2CvnV8s_R*ByqO1z$=lG-IRhackyq{sc{q!-9o^{OW5xp&I?QKTY>ce)y+UeC=k*sp+$( zXWx{6QdT28S>9$;J?;TZ*p1imwwjBIQWv4Wa&-66kLQ_}RL=Y~9MKzlW%rY+P6v{VfmnbriQYBp*gpP|vm)U4cAnDB1e` zz9jj}flQ=j;9Ne}4VM@$|Iw#ubhNi8t*3lt!R%rw_LfeG;KrR%cQsY4(CvMv@Vst&g9yR%d3ts;KG>3LtG8eaC7lEDQZ8B3@6ktog~(a~Z*_6j{3GFDg;10%lVMb-^m%O6~u= z-w>zooAYB>68uWP9$$iN=QF{DU$9Tyh%Bmp(Hi)M3I2+%_Hm6``nna z7XMdUSX+2WA)li7)iCyj9u3r?^wVJmYPW)0>!}I3KS6 zMz$z?b#>*P{NS3#Uiey~7}Kn*>jz2Pu6pVPIH`^ZN)b|1PQq(yp_i~OBL|{F$pzgFu9Kusj7e|!mM$V?9*qfds{b+L!A?S}&! zr(yJ2WddPO*FuhV`|Y%V>Jp+MW7q3P`0n5-&$Vj=L{5T?@OEIu``i>&{5to?wT(X$ z+09OQ&leqXj_;$N$0Qh^yk_I6khC=4ENyD(>v-0E8|LS?ExAw^sOTGmwlbV@J2YOH zw!Y^xj+8r^Jd`V$o182TP{_G6Kja4s7&%SYSZwVtCKO*lTl@9zz$xqB9PF8Y-V?*h zPe@Fuy+PKHdkg3e`U)iJ%tlPn`4x&Iq|)?4%fxCvRor}5Va-usQ%1jW~m0!EoM4dIvLo}oHL&^N-pFh89gq7?E*tmR7`eR}=t-Nrx<}jyZ zDbD@pbo{CBI|}`AzV)m`GF}u21MmIH~bTMt}74yI-eB0Y0YSFGFhS~U<%u$XZRcn$92f5>8nTotJ z57>HciOa9M9yOvjn^ygEYKOd#qjgwA)vWI)5fv-%h&)P9b$rWDx;f@(IrH6`+mX{1 zE*3fd$#1Gd3(+O?x=0SiriRCUuUg!pRgf{FQ5Ar zkF3YVL2rBPW!)%j@|yLb)vc%wn+g_f?8+KSnm$VXa5YU%%3W|ci@lz4ZHP`aConZ? z-zj$cN8osu|5D4Touc<5S#{1`ub64QY&rFRp8WO9Yr0G#&KRfoe{Mg3TcAB~Q~T%k z<*IB?masqX%FvY{hGr4m$o8=onmC?>n=knm%8mpG!azWi^s7<`Mbq74;L>ElU?SCs z8F+zk`vnjLfbi^vTBP;XhK3zE2r&N_K;E!(x4QIJ^tBt*@UQnieSCSfUW1o1B*Th! z?(?rGwGMts_oVxmnaCluypP*e8G*q0PhesqXYQ_2qSh^7$<-86OMzwb6+TkeMpJJ` z$BM$JEecqgK0-oLV3(ryZ$Hq^f1TEOO(sR)c8Dp@_B(r@TF*G$=r}=QP#UL z=G1V~c|Nqq5?|YBwBJ5b!xUe8n)^vMjd77 z^${c7Q>U#00vn1jB3~Xx+L&#Ug*-pW#gyL9LtPk8RWKlaGTN!u)f<{&{l;9eN6(d7 zkY}4$hYMzVU!di7SU$G>jUfeW%rfT8!e0_w-lfptl@&@87CgX?Q9Rp@(l5SDgs54e zYe7~lk)s1lrb!Wka~jyzZnpN_(8ol(4aks7ID9G(iUIPJD*V~e6|8Cz#qtqD(j0O4 z6J0*sD18k&=}l53)H;B}8d~?60~dd%2BmLY^AhSf$AGUrk;0Diuy^H&rIGFi9K&wl z8X5G_BVOJyh-ZJvaNU87aDx^jVv|3W0a@`*~Ms%)fk}ZgnC+C z!S}@_Vf0HA>g3*(S9U@VN=XPU_SArkQN~E<#9b+~ZtJHcC?M%ukelOPC}hhAI)eW; zIX1xgEefeWO+v7`8N=PZIwc=n6b?5zZ2y|Y&m6^=P%3tn zLIL&ip7rO&y*sT@-Nyx@dEA}O3sXdkTm6r*?lj4-k_Q{gUp8F5(dBIOjBGdK^R<{g zEqTiFh=$iKq+S#5v?1aQA>P+l#HWar_4WQqV(bigCsb{Blgq^F;Gvri*d>*I0{97G z=#?FYJn_7f4d=pZjr1>+3CXWeMQ=Z7jh3)vwz;Z#>><&qnD&E>;;qAw92_bSZM&VE z_LdFxPOcG^@CQj^^8*!gB68+L(c<$;pk+Y*H8kzjP!NR;#F5<^S+^^MWjb5Lc+yoL zfBI9=g;nkQEIMhO29dcMek-uic3ZtqQ7!UBGK*|?)0TQSxRRqKT=v@TTKGr$kiPSq z7~+Jg<4=Yg?OfO~2RHQ5fT#iy&osB2g!Vnv^ma-UAu^l#?n~IqBO{o6BUbc9XBjO9 zZQX_Hdee;J_o<6XYI#PtYqDb|fc>hgh#ttpyiCfy!`9ZhYT6R^@FWs*r6{PqXYwcf zVKFSp67#O>E#}O-&ZgZYC9Bg!%#WgtJ>f-?L=~|z{(&TxeT!wW@~1cr_JiDH$JB?5 zt8K>>d8kcV4HBIa#UjO!*7U2tg(h}w*xK$zJ7DN6{JJ*XUM(wL7UKOvAHsj&jd^25 zispc}ce=t15;`%u`fobUWrE}W3c?O#xKI|HO~iS3mATtiK8(N-##|R@izK=BhGHjD zWc{-E|4oG$Q+X2v21sDCe+!a81dlubPvHx$a~0d%fa`5|k>sYn%?2I*$IAQy z^aLFQI8ZDADkLfcU=X~)Kf4&Xr~oD-2H2D?3t(d82lPYlt60GN0dnSltPFrRq-A*k zY4Z=m`PmAhnahCJODTYoEV+S37eKI_{S^hY3q+DWtX~0`>KK3~%LozF^#WXvlkXxu zRO%`qVO)WB7!9JgY=PG~Gw`zbXK$s+O;28K+XBEnvEK-70IstZvj=G3 z@li#cw1Cw=6MIx#|MXcNb1S3udM$9M$ z@3u&MBE|$`V*kP1_N&ZBxbN7LShoIJ+G30 zKn9)|xCSo7@792h4uCmi3Xn8NGeFYhUCMP30Gg?a4TzVa0_=<&U?c8W0Z8FCxZeN5 z_4-K13mV`s^9=wl$5QabXmF9;bbVqHV`l@LLI9kjZV8}(Ua$d-Jr#jQ2QKCB19(QX zNgm)bS3v4+SPH-iOjfY~CME>D4^%))f5z?R zQuDwIz$trrfSh^G4e9wX46<_K0H#KIfX561O^ptoShocrjq>}TV?Pq0d1q20WkJbj z9Z5$d%lZEgn4uJs3yF;aql{vh+Q#hx?h*o!kS5x09Q&l7Bs|ei=Le8NZ^09fz5)V7 zi5ZNNAqTX?ToR<DMecAH0LVNBNYw+@GGb^6Ku6vf18gGm1z=-V0Uq)u6hRp_4?5-u zXNL6L{6~7lH-k7B;RFL60$Sgj+e&G=T43Q8OM;kkkh1PSoaq|+3d-3WfFNZ|z`V%| zPaGNs??Da2NmSkenBi&#a0=7_8R5wQ>r!7A+zO?1l11z*)H!fz+EkKycgC9W0#`=-j~o z058A6`j*uIq)_@9Il6%r%-tiB2i=ebEQ#QxS-&g`h#6^+Pfl&O7pH$d50?b*Fo}{&p5LJlLx5nNenPu+yZ3GF97{; zD}Z8r-UoQgG?+r}chJTR7TvWPa8i)G0xSzk34g=uLI~M+@xrw2XuR)0# z>Hsj41E#T<0V>Ym4cbS+2|@WC(DcFb-_ijT&pBBBMaHe9BgKVpn9(!jfJjl|fE3Bj z08%CE7?^7wKJIeX{m06j0S;y6eut0i!2>{;L<6`aM~MZ1Fm`~uS@Q+(ijOjIE58BA zla0%uar5jpc0QrQX9E%oNRk#+Acg1vKxOj>3l`S}ge>rB;1`(=4)?r3P(8qHtZso% z1CKRQfY2xbCT9BpfGEd;V9Z!HFy=BVNKct6*Fa<(gcnVK$dp`g81>#n+A*@NJ^Fbqz!+1E$v% z`7d`b?tj$GchZnPa1QZPQO9IA83IVN$(ITo9)2fQzO4w?<1z#c4Sf(OQpOONeuffB zu0bpWbf!85NFzIqRh+%@SFKg2g!j<*P(mNiyM9J@H0F4-A~%}z=_h@axF-T(s$J9g zz6KnDG&l%q_3M2D*y@T+ z*o4Qu29K}9|zya3rw!KRM&18buU2pq_dkZogGY>kkku?X>v+G6-l33(}=gL z$7$x{o=VQYK?e0R-P;sm$^W#$6MZQ6AYA2Lc;{HflvzdmFHC&i9E1BR8NyhATvs=-sF z)36{8y$7xJxSzVib>|fU{)Z4okn zU()?hOqJm7a>=V~T#cppX&kLrfspftXB>=A`mw&f^ytQ?59?M(vwz;|VyvTTU`kGAZ9O@rw)uXWipC? zEaBX!?XovQ+a~FD(u4WRu}S_v89XPYc~krl%8Mp$^v(6Dqf13DKQ4qrl{y!HRc6hPRNLBta6hnJvz)P;}_$ZpH-)A3xkmeti-&)uvN48RSHZd zYQH2#(e|EXctQ!MkdY#+B&fjfLbI6qEllP{VTq>p1S4CtiPlSdYcZ{2*SD~v8->E! z+7oGMCBi~RCbufT6&QNtw`_ch>ME5`xZ6UV3X^%DeY9)yZs_msLQ9L(E{^aXZ(~2V zN;L~^UgXq!fgDGkHy?#7Nu7ib{W>Q!(Bv0DN1(S{Q1e4WB5$-L5ry)#tM#jx=@_S4 z7j)K8wQ5;U+nvvPQKqIh@|7<%sx?`G)%&Jc5tZ?}_3j9EFEinbs1fU;d@uRu zDIsIN0T1+exokAfkvQeL!AX}ubJsc?F}vr~eYeL#YDaM~Y!gI+3A9Hp@Ib!JLt$iZfr6)p z!Ks-OI$6Agbt&%HJ19#3Rx+kzHg=LS<0Cb5BP82hp*lHxW!Qa2wB2Y}R@zJ!@kh{E zqS|aYFeKK!)w{;>bd^R@d0c`yyB}R{Mda4ug*IoX`d~se<$@_v)1#Jh=Av# zcH2+ZbQ)IdJ!SqZg(mh^6x)5vT4tYVb&#BK7%`j~n7x1Oxu|nIgEc*t^16OLr6itV z$<0R@Vl9x7n7VSOMx!rs1ku~UXx_N>W8iu3f~mZyZ7Qd7*>_HFm$#O~Z~7jGD)g@; z*`$ix{n;nURo{Q*-5@#UJd?FBH#LVu+Q$IjuwF(J-SKu# zofm0W-BI$5pI?J9nwi{}IN5X%Ie{u`aicPIxN8NQA+q8b5PWnUMAH5P#N}-iZhYZ% zKMmHsQruKc zAW`haFJUFAVO=52Tlw{yy0yFAOe_e17pSzIFRVMSGhv1J`vJk$AzD=5=wJD1VVz z(YzDDUpVqe-ZG3CH#Zca7wX@%$ZuG!y#qZcEVxfH96HQTULM%f6gg)(uCnu!4fU10>-YHA?b0-AGcUDfkLQ(hUy z>vC|A7AET;vn}-JO@N)cR}xicloHvS=gd38O6T=gwO%o3=^X180ir+A6E#0rNbB3=erca& zlFpl89H0I}oFvPfu-V-Zd9>fHfZapbx6&HD84FcC7uu@uewUGZ=gy_=kKdsCgkz$v z&=9TmqJf^dfE9<)l3NnerOlr3)x)^J{wa{&A~=TN!77n}${|Ne>7-Ojq^(E_y^ncE zwgB)KgPp$LE?k3yv53I0B|@WlP?EmVPPkw=I8UMed$)prS%Si~1D?WIx_N>@YPrIR z_6nrs_@@SY$4?Cqg0!GJ?TfT#U{>5`9yQ>VbN$rEL1mzTZS2%&Hh{Iwis`33IwY}( z0pQRQts`KcmhJveNxh}SnrWr16(CH)qU+mnt>Y`}r2U}8TyzxxDZRp}%)6ovP?T?3 zRVUqt%ad|{4duKmui%{V`N>)CSIzk}scvbJ^U_j_Jde|N;F+a88@Z)Xp7!uveeAH* zG|q5-DkUU~#1Ig_Hlb0d^IENN>`RUkXi+7F)V6SxjL-1z8dKTmxnR{O<+5~&SGR_~ zntyX`weIiQ{rv#TpYX4iKtD{&h02rR$hK=pg7O;k5cwzOvbuX_yTf#ye{4tD<&X9S z_Qh=y3tw7uYY*B-Gv(aB=r=Z>XR68j#TLp3-G6-mO0po*W)t z(nFU&=&HVx$K0#+yF!b*}Q1nxW)6oHdk6ztFao?B&)iVg*YeHi=rPDr-Fh* zYVoKDgl6oM7be$fLojhEz}!Ol$RDvS)#?E~bTTmqD|YrVrz`zYS!zCJkG2kWN)II_ zBcX-yMM@dG0sW);4-0O)<7u5=H*)(F6WI(q%q%ZTXYKw~8g_o$DjYLxa9rx3v)rp( z@w+))0~Y%01yKRk2*#k%Xdz}gg%q( zws2SK;aPxnE=71P&ae!CIfv!f6tN{OgumGR{`Uo=0@`pLASQb`%$07K7_gU<9_IRC zm;(@zM-f^}JzN54%vp}PcsZ;Hpw9OHNjDTz3s4Y+yakly*vDu@xN;9?05sKC|D+5> zPLWx-ByF9@Bwifl8{juiy6pla@x zEwi(*HuO~i{qPHbv6^q*YiZJuT7ltk0HJz=^a8FW2ojy>fFEQ#OspKpvtOZe=e*blW;`Ou5# z4-hYsoVFgUoeUWO>@c<+Mp+?_?+dV0tKtU|29&ZFLAGeS&!L zqP$R&ia%GMV5ZvYedq-q0OBQ)(?<7dCqo?o155Z65e*9#s;%;eUEnc6yfkv!@=%gC z09K8Xh=&C$)mD8_5(~u34CRHhlc5iQO`;@{VZjEq)n=5$2Jv!5d7&g@0PGAUy&D$n zK*d5ypCMk~QC_ciGQI&|ZzO$+-VY1*qhf_$;Bi8{;!s{F$r1qjjFL!)1;!*Ky6hWB?&;h=22cK$qN8$LrGM_g6C?h zu_#Fh;%~gWfv&1oV)$PUD#PtS{`E`wMv;BqndX~A;6og zV0iluaQRwHD;Xjqe$-?YDV$r!kpBa z!Hnw9g-uLmmS!0BzK6w;OlpcJPbm%I?31D;x@UjHxbi3X!$Z%NofK)NimzZRRz=YZ z0V+6(p;jr9RZcx7A0hiet5aV~`RIkRZ{K|dN+|e?Wtk2%wn3f+cZkp-kcQUhzNlqN zc!#JK-osxKMR7H=mk&OIp6`oZVrwaMnJ_>A!KulwLsE4H_KNbcxsR4wC+&%yI{?8i z>b;FfQt95iB*rnSjU4@mfoKKqQU#SrL5zZT-TJTdKjLsBX^-8Nhe&L?d4GT)?!k-t ztDz(+T^i3ILBRs4*$DLy7^N(|a4-SqWbR*^B1m#@i;%=)_DkFu5AMPi&0LJb)2IR= zz&EsOp^zhmpX9}=e7$@M>tb*mGbTQI*X-v@y*>a3eYxNHlds>>J=5JlNDookEO_^F zDO`w2u6>oqQn(SLD~z0=ynk~NP9QpYk=MHy%}W!gPJm^=fqiSPfFaKOPF%1s{|ywi zMwzrlf13pme_7Fi>k>+U+Jr;)dZ#e-EtHLLufRm;kdi(o8+UbGL`?pp%^ximABmxl zHt{qp*kbbZHe4Yu95(0=4b2F=L*tYtd5vo<*yz);Gk7-ry)=J;*4kT&tAt5@JS<&S zG_kekxlrdy5kX;e9GkVK)lJcG**sx&vsVI>kA&`%L2oC!t)+$EAO5~m8e-Z%lic)= zVhQzD{mHz)Pl=Xo?naBJL4YF=$ht{IUzZ(kYj#CNU!NVn_UlNn{=+62eN*;y)^|d{ zU-S$fymWDF_I(P`t9r(B^bD`(EF9RgB4; z))Dc^>CK6y2d^_Z{cyHj($6iETT;n$Mj*z@m(~yN({4)eqL*auM<4&LauEVkenLm3 z46Ht|a>XX5$c-rFTYj2Cu;IG}s;~Gf_$9KO96YXZcoJ`cn7rg{;Am@Gx4R(k#4F_W z9W{cQa{y`a^^nhck@|Ops-KCoeDHY^GT;&^LS~akVmLh2+&!0JhPVyE`)LCpt+L$d7_H& z7wu*()7Rk|N^c?TgvW=NnzLBkrE#q(1qxqA4PSxem$EMQ?gQaQ2{9NybgIZAa}C-` zU48Gzs=b+zVZT0E`Tp*Fx=l~HdwZX9Md??}DdrPKQ=ssChd2bng!v{>_l-vB$9yey zxsqD2Mwococ0xFW36D{mDxJb4SgwR1Rzvzj!SKr+;#azW$UVb`gu1(-m`jc$Y%Rg~X&X`h`QVRr)D1{j<0TU0*JGe0g5SLw0CFEO5SqC3cr+OO5;GvP)tT(Y z2EQ&r@&cF8(%`y|_)mtKNUo=2>+uI9{s40Flk$?45prUBX>abR(>v*EB>lexKvn@< z{8Gu!(UXHV=%T3ad#{u zTPH~P6Z_ie4bp3)6Aax;baOTQRF&p8%#`K!eSFa>m!oq*N`)JXjJ1;PA%k*Z-=&l^_po+pVODFzH4sri z9?LnN8N;Jh_7VvFlrbA2J???!)TU0YZjmi%aiRK>Vm#u} z#sJ=6`#RF8`T)Ni`7QPavqJd9hpI-@1Ei|aQvSk3RUg}%`0;#-p5#nGoqqi~Tc@jR zo;r!GIcQEVBWO;*_jwfu%7Rhmh_bNj7bdJI(^e}bd12zTkKUF!h2y=F_wKAkg?;_H zB_wDr9ysayV(fQm)F1&aPb7=FUC1E8Pjx%3D5=EaG!&*6_`#x1i5W{D>GDQDm(4ep zzBKopey#xuT>SmTq7G~yB-kMwE=wOI@YzCY&Jyb9ww-CA$I@@kzR~|+Li`_iivsl0 zi8ghp`A$MLuB8%x=&Ea!7UpYd%a^pg=Ll-BB2-nyuU0fRHG-_jyUoxg{gD_ta=Yi^U$5; zyphXXGo>eL!OAs*q>oumDatM%3e0iAOI<@nPm zHx4XStLw}H@wRuf-s%o(;r#8H_|(6kF?vMehdG%4>qvt(<}KaDf$1mM5@Kty{sfXd z$(y4sQxB?w%^`>Ek)WiTtd3#Vjtx_p5?(`7P0CY|iDQc>hDEpF$5FOG#!}yA$HzAV zzcSqQq*M4lCX|u}Rj05yjG>zSOdxOx^|0}>Nr(oJGr#eA{(Rxd+mb|vq2-6GVE&v9 zybJ#D<}5_|**d0@0%p_`ny9mefcNBn%)_(YC)lzA!nlOq;b+D!wlYDW-m5eAa^%5L zS~GHFtrFRpAJiGRvMe3$KTf|@?qE=`&1aXAid-pwllI89^~y*nMdR!TfpzPwrlCzr z-akiQM~98KwiN*rNd^%*jrGwU6Sh0aaD#$TnTq%$Gl#cJC5yQvJ@w8#j1-@QvTYfL(Tv2m8^S2*d82>v* zegyh0cIEd*lq`@ga)v`D+^&8{2`x%pLEB*_esR&JzKZujkJKS7?cZWDvFD=G9P(-I z0ZGT>E+M<$r}4uyNm|L2LjCFge$sx`GB!k!qH{oNG&}6KN5-k9wO}?i#Fw7#=8Fv46YwFEX%`OhNk@JkSL94bVsX4IBYC z`}ewvAa|`(kNE+vS)aQmYJ>tuQK*qgzymy4D&i0|AFifUrznd)Dh#P~XJ@AR)uTD; z3~GBKcp!e>{;lA9)M&2G)ng5^FtRjp|JHgM6^R&9N(#~C`3oCBq%oDc?%#I)5BTP; z#lZQrwa}$IsUMw6iF9MQ_%$)Mrt%9(R`HGBG{C=Rum%+Nh8BzlqED zZy#UnXzN}*R-i7_sfZYkFs(%1^HmP-UfrUq?hr+%NDl1(%0lHLk*?~wo z>a^U3uW_Tw&{$A~#R`ZvJ~O0VtF+JJ@(s)X&uW!GW$w)g*@~_zpBEh2>wBSJG`@3O z8GrSDiM;%EqJyJb`HIGpe}+#4r4b@2n)sy|exQI*6aV={XqqNu z9`sv;+)-tG9!in*mam{Z`To%fUcS4G*hd$iUIayi_OlXjmBalW0w$0tC!%yq$i_!e zeN-Ji)#Bck(4z8zMbUD66$>YeSwfO8M&)mRBJPnL*L$^gYp66t9OB--jG+j8o(dIqGR*(v`9) z@)pxcC8-$!n>a!y4ug-7OQ===mb)NMP6TpyN_DuQRS-g1aLCNqW5O)RuBztQ+REtn zaEf{ptWEK8k|-mlfU*YO>7^Rp-dY8Pe0dSY%98?rWFW36b+7>@tTkRZo z#)A)r+rLK-!*vuBFqec9WclY9V|WUP^IFO;{KZ2rd=K$w zT?>ojwcJ|>j}q791Dx~YODYx_6^~9;^c#gehOHK6hK$^XhRi&Oizp6_Rrt#=zv#2+ z9a~)gTV?dRdS8SzhbgR^w{#%e_{2YU&r7p(lgmq4)JD=P5xkB4dMwEP<749c*!d*j zi6Uu!ed0XvY)x4~Mj}&z08vFnW<69IU|@&5^><2CZ`rRRI}zQDzwhAsxp1oAvA66u z!P+baR)B*)Bw)7#5;FeEFctZKNVvsMT<&K}V4`4W)GifKDHZyt=_>k$L7NrXtW{x( zCcoG&Q))7*d;w-q$RrPh*cSWAuR7Rkj3r#tXJxG>_~#kZuRJlvt=-<{2Tf9GtY5Jg z&=lloElqp~`&~gZ%YPHDrRrx|Dw-cd)PfA1srHBD!&;4BEuGLy6J@aT#AOr=;?flW z&Rb^kmH8Nb``&3xXI*L7$5&|7-BW2?!qyUB=SUYMP7laej0=Mo???+8s!U@SDUUvQ zOj0EX-~m5)EOyEsYIH=;)WfVwmQKmyjNHSB7+MvDd0D`|B_nd}x+F<)xyJdB8ZK1p zS||CCh!**t<>i76xQ-m1$XS96@e)f^bQxXLLX)iTSkBKY-=(~%dF_(|7tySwPuwTW z%0|w*sqO*4xeLC11D5(UaGyv>1AV&$fcY4v5EXdLeRtDz;e3XAsX9~|zRAkd2K4?w@RqASEd zuY!!|`T}E0FZtFv-b4{^cePqxJ~?Vu5w-yTvUn)pQ~H&>FFO!)^6{Wfb7(3ycrM?( zfK8sEAtSznZCPGm;h@RgHbYzh?w#e{Af0@r+|PTk>;O9O@3lSnla`SGOLwYZgjQuL zVd&GrlICr`9S>nXOeQ0t1DV_Gz5@CzX?RGNJ z&|mM!ms#$Jq(jP(w*5!L=FYdp;qIjR(|7~hvh?N3id|MZBHHl6e56UQmP9AG0A|@4xmKKnWYj9kKJ3cxY9-8#`Uy880 z;qBDK;zRe);^faHD6Wd<)S=xx^cx+yU61HmfrHk9DLK$zcj?Yt)geCa7xeFW}8%V-f8eb+x_roKiRYAD>FSMHmF)xG{9PJ>*1N)e?K*T$)l|9E#_H8NT*HN zG$jhZ79uF4G`(Pu4B{?MvIov8JSjUiJz=!=P3*Y11Dio=hI z)AtFC#*xy4o8sN|l7}-Dx}s2V$pAfXsbHDd!+!NY)D0~1htrLwYa;SLvEhGPUalx$ z6nH8eO?FUTGXrO*KKu6$s@LQVa7XdBe`*tHIRA2eCl`TDh@%t<_`>f$JR|XKdNr9u z$L0`n_I;XREcr_}t!vvN%Q%q<*I25sK>Nav4X2J--@$c$H$hyoFWIvGW}Tf=_1p0i zH>ctQamIpPZBBx&?w5s8rc{D~W-Gd;S)+&gwfj$Dzmnp7>G72^mMWE*B`-&G0oW6^ zcEJzdqJA|YM=NoA>|1wn+ZJR(jS2C%na+IEmx_CiXigh90#l3?@wu_j6sqkWyU7#x zolSRA!s%@&Hhu1$9P~J+DjkLdo7S#cS(PTcx<>w1hG9F^PZ8h3j~8ps{;|L|1Cxv| z7f<>vc|lGS3p+=1hq0DXUD zfr7*G<2LV?A=}|UJonYccJX}2WGQF=-ky6ZOL^19U!YzfA&KRloR2Obkw%Vi)o;tXy#dw6I8lyGs@-s{ zRx9#J2Wp%8iG$^EiZ*^(Rwz~}`cWCWV)^W1WfZ2ABTR-h`#^eIJ#BE{iEH3pm)PQoYrOu; ze?8299pvx1R393wdw$?FCsb(U7?P4|?c6y~>7wG@Ft&@D5(fACi|B-4z0l{xvcRSV z(}#&AzJH3?`_G4vTs-z%0#gr;wXA{=1) zL9xV-X!^M*!U?7y6-(qs)88D`eF@!wf6#k@Ya!1Krk@f^R6^62KoK4={XelpBQ*W* zD8dV-UldEUN7Mg!Qmr6>pEJZKc>gIRzm1x37Q6xV}Uexekr!8jip#rel&__!7M{4g?cb9 zfJX5y7}Y!0}$$cWpm3BFCVc~)YypuDZSqnL%kwFx`CCwTj6V&_^-*&^Bi z{CCHy7T}qR(KErK@6VkTB-6KL!_d~DIMsekG|#Mm@A+Db1QSzhDLosf!VndcMqf81 zp2SR##OZ3*5iz;^b$M<$86UIvXY33|%lR3bBYvk?80LB$CW;09hNaqLsB6fOJH zB!XIM5!{W_U%?CiymVfX8YR#OesO1lTbY&kHrXxP4BeS<^o=ymdSc99>ZHhbT_@*y zP$lWt>ngHWa|LD?&bJ0vbOpH>lZzD0ZpBT{pYBK=;x%H??&Jj;tKjt<2eRn+>#@Q6 zNAh9#>pvZyV4ZBENaT>p)4fW?)&%H*<4KTMDrn6jo0@}ICTNfD^PB`a!FnI_4}2}a zbCZu(C8T0M9jUns7*I~wdeKeE!B$eOMimy>%AoReL`Z>ATx(vN~S;0C{<|L)HyKp zY>1vHh%JLuPWK~}`$)4#sOH}h1B3cD&)YR>!d00BH~sQ*ajRzx_Ma0w1zDy^Iu-vw zb8jc^)l~?~12Gf32NAQ=QK@~y+!w({WW5sVvR$>Ikqo2CCMgMzd?g~LN{3uFp}w3r zF(wH7^-4@Et73mdIgBPdx=JYb`f4?*qTTlIj9G-%&CVJwGy$kbKVc z{<$QB1gzNk<;*1l)u8@GI0!GGNP^(=6qaB1cYkI@E))=WF?|qF*U@ zc^*`AF@1S1_%A$DV*`8so(QyBx%%YM67_vWo%YrHz43vn0kL^>;)0;_g2{p8NX@R$ z6TLDeX{N7PBTJgA_vt?!qfS!?aTNWJL>sdwXi;Vt2DUXC{G!e%qVXYkxw>&d_U3e&gCbK`YWP?n7{ zI`{e#W>>@o4)8t<`XpfxNXyy@PBfJpVb9-rH)WCnLQWw%E4DE6+j}v(Kuw5g_OrPo z(39=nEz(c>AE?E`STH(E*td9Rh&*L!z=6s)Cro$u`m#=m&xAQWg_L{d$~$a&;&g_;q!=pbqTV97l9B6SQ`}d2q(hRpX*CnKDAv{-)*{I&yW1HsG_`7pk4YL za9!sYvFylRT8>~}pd_efZ8`YmxIO6ejZ?|jrK9Tc3wTxPrsEWO5cTD+IlEt4Ul}Db z4gbARw$tL-_FdBS%hQ+}5a#}ita{`V!pR4SyixAC(x2N&T4pa1I9ex<=jau z8x~-jhO{FSN|x&ht>&i3OD`e>(pEg_4VcQ0Y)YD{OA4l-zKO$hb_UCj+EA?njTsG| z-FJ%hz9}R_k8RRNo--ONJ~4eg8-?Rr(Ycpx?kX@I_uSQ^c_Zzp_8zWwKK8j;BShgi zL%{dSe@(OkYco_}ynZp180cZ{8$#PU@OxQ;DR z0Ke?;@U-PyiPxgIE~uc6&3%B_IOKfJ19uNLcd*qt85Y%UfgQ7){cFE@VoFZX@EvK* z;#8AZDqHCyk>X5LN}`R2sN&Uzcw*tZG()q0jrF}$WQ9(=!cz~T4yhh2qe4oG5C@+| zqel^^Cy~e4<&KtPGGq4=L&3+isj;6DYHm00uYgXgxs;uaYtqb5eSgE^E$K!ZL$d}# zsiU}Qqj=$YZ!v4t8S-Np^8Xfr@}Gz1qle~Whvwsk=D!WiC;qCh#MM=s>=|Uqf%|;K z%-|10(1hWg)POn-@HH(E7YxLs1>(~J-<-W4PYd~!L(mKT3FeLjLnFc5zrau&nNjZF zVCZi!cN7>J1?G+hL!-gm_=C_GF!!55C>YEg3x>vmx#PglI52lS7#a`eCJOljC0b1Z zGSLE=X@M-XKvr5H8!eEX7Wf$q)qf@grW8bUF(Jld``V{l? zXpM`5?b`C>`C|`)J#gX*RARg&DvH?q_Qalk9ZyNS-!fmS<8-o8Yu`};jY#THYOYMI zjW#Z~giXvOv|9aZ&0Mr&1QkP99dxyu|Fp!p_J!fO7L<7H4AWH(o(6QrmAS97>qhyQ zR_ZuKvG}lq>m8@J56AN?@FQj;2Qqvj_X?U*OeC*UOa=@LY>#nA&sLYUOPbr7H{(~; zN_bR??h19(-P;uafyU&62*yfRvEo*W`HLJ1Eppb;btFGEUV*-W3N;GMA5t3!=WXd!lUGQ35V6ZvWLIAd zl%yNFGXh$FcXEdkEd$^CfU}&o#8(w3Li}}c@Gp$Zg$5^P&S{+bVZa|gVD^eV@s85b z5P$g;Odm`dT*2=`aKOz2KF;Ei|Q*Ea)3k~vVd4uN|g$8xiy}|4`XEbYKyW*Ky0#Zz2G!rwC zm_Bdi4$&=`!q>-VUJq*Fif5LP<6o4~#xK$ia?Ngu2iE)vc~S_&yq_bY+Gapa!ye}Y zhUJ~nIH8U)+TLIo?In$=;)ZxfEb5|vkA?_k(K96Mh@<|q#5KL-sapoAeNvp!R=>Mk0d+I<3c%=Myj_IO)7o?jQb>#y^jH`Xcs#PRj)bmakCW zL`FV!#-lH^b;s}w*JeAx-<3swabBcH|!R8fkrbFo1cIZ z;})q$LM+84tw+Vpt0iTr56q6X5?=m=z|V*ug&R*Eso{<6q}S)n$g)#fQj;q-pqV@k_){Ft0OyHU8`^S7|@9lhqLuao9+&gVpWVh$cv5?uCUsSFC7 zrL}Zptx}GHgR-%d4}f$*kFQ8M$B4UolHr6xOSQ^gSc>1yoO2arkD2qpvR&)^=RnlE zxGGGQ|8xvC<#BfPqv}q}pGDv~PkmP3$p7}jiDcp`I6jj91m8B18&<-;R7ND zT)$<0;QmPL!8-=r75j8f3U#w+86OI<*xc@3>K#IZSu3=t z-kJUdF+AXl1PT;A-KpM#V%pM?xA?w=$avGQh$oOqKXQ+9Y5Qv{GQp&4BWL?Z>gOHc zQ>(RTNnK!V7Pyk~{v+JqovM$pc5UBBB&~*tS$eX{2pLj8#KZvDXhQhYC8yKhnnU)2r9$sO8 zuXZsse-D#@kiR}F6B9Dz0cR7Hx>_>{1h>UG%UX&u< zz1hD#zK^-T1o@C6jcWt5Kj_a@Dyn3y#2O+WkzNSNON7@Ie|rWfz$7!|K{X?3zziPR z-&}xia+>UjtppWF4($tPu%&m zMeE%M>+}b)OwK`|{@ctU7sdQff9e2pZ);usU-}VtbV%a@aqVOe|r%TuD9$NRi~|&WL4Hw z-B>XNy=fSxOPgPQoUPG5(CE?O`8TFh(eX?9BG7)iWu;o%OT_vTux@|$o~v(@xK3lw z!@kZhs@;uyHLW5%b%gJ||5Os0ZB&}WnKGsIYRgDH*&y7yBQwy#r0_Z z>gq)OTYg8S0^h$ot^6T9s_v8Bsk)*0E4R3n%ujX%6dIVV602hBRneN@GUP@scGZ6 z+RALdqz5~BR=V9_EF&KNtnnFAKKO3i2yHN54r^8^K1Ghf`t1M#cVRBXL&PjnB492&WE>csm9I9l|G zI%<<$%XH;{+$3AGYMxeCmT&Dn)Z)KXY=&v+%uS28TtxI6u6T}VFqM;+X&EJ#YNe7? zX;CexxWL{OXdMvbYDHaZl+LnO$?;?mS1i#Lv|M`h_cWS`=kJowKj-0y_x z9>K?t$FlpG@Fg$@PyKZT#Mi5}lh0zMsGW+wtIi@UOaGb#)_}jenxq z_?ZsGmV+1#z5xu>0R|iY-@U<|Q3E6ej0GCvfl{u$S*|X;AeVel&7EGvFvI68kP8xL7uk=7HSQBfoV`xf%@AMy*^NxT^RPfN}_c85c{wKtUKf9!&c zyyeJZi~-&n9FO`7?UJWMhc^=wZLeBK*G_IZk43G-Iy+oSTzo8%!GVDji=aR_JNwC6 zpo{X2N9=@(YBdGZ=W3%HQgGXe3^PPPfMIHl`$&u>&n;K22|LAUN$2TVuc$Z2V& zTwilc@`1b@`|mo6g`Bt$3(Hj$IGCCgQS6BhoDo)~>&@CLS3S8-D*hii( zO3Z_}>w>qF8eiVwABzqm7w(bkPuCzHTjWie?3|Tx5{GKLpjWV>zEAf#zP;j>AJd3T zo1@<9^2aKhriBKLqnz@K_vVIM2aHwx$oYmFgoWh_m1qxwgKrz&>2D!8c~$we@GX90 zz5DtiXM~zG`t_3u|6*hH#z=w3w)xxK4R8CmJE3z}OS-Fk2~{{=^%+EagA7$X(2SO5 z1CapeuZ>ZTG=I^7Qk8=g_)-1v07+Hntd)XCpX$>dKY}vVMODc zm-F=kRfr)_rW{69Sn{g1aund-Qn?AyAKcC5XK77`yp~R!i~UuZ4g~OyEB{k^Zvlw? zJZDg=UtOv-<7{_eYprZ4)S@?1D$1R}Jkxb3z`wHY`)S4C+gegXB#r_(ld@liWmAZr z=h83B1_ynerDRKJy*}?z$&`+>wzijx8L2&ZXv>kG&X#O|>b>1De09J5{&&sp#YB|I z0rh*kD8GsQwA61i*Qvm(qZa0AQA-sovHjf9E}b6E5T;bl5V)WYd)Eea ztBi<@-yUAN6Xjkl9W{tyEc5+wg?tbG;@qiFXvDqw{u(w zjBM8K^maR1gpWFy<&8Rw@bWd59auWv#*Z)OxSK0SR4F72u3pc$FZvEF-#5hA{L-D1L*xP@D(IB!dZR0*kDR+pDte(~qzE~GnqMDw4! zMDhcXpWU+R97Y3ly?Aez_|&FB8pZrRmUd%7y~9>pp)_M%iPt=g?IX4r_u5nWGOKg> z#JWI?$3$NI@wRMIgDZwhwI!3WYI26L9#&=^Iy@M6O!{q&&&)&PSY8AxUrdTfKg%o7 zPNJVpBMk}Q&f3Y0b^y6ksL@P+VN1aCr)+(E+CxApxnPGrGf?POAQc@dr_f8MFSA;x zZP((*%ls~M?Xz=HIrT#$+fj>9O!H&sr;p|gA44!X^Ds@V!&T+_Rps!4!0FFu`ClA! zT8}@YXV`rcG|KoU&Yb4so|qioi^24lwEVAn{dZX=_kQueMgeZDAs3n(u03%%yijmD z7A-%4gzKl!Z8fZ+5|rC>&OHe^yl`;(Yg&G?bi%Q8Tv~n#30Hy8ZEvg%|9xsVvAe<) zLW#H@inj#RyT7|KQpHmmQ#Ehg9f%%zUzj4zE-L4KA1?F&$P`tAZ z81;D@her32K}Ih6H#nUXP8SibLG5M>xc0F)Ikg_gr5D*~zV&}#oFc_- zhrT_>zZuW1wk>~dXmAfvYJCxB5;`55*G3aWQ<1z_T4q7og?$ ze2LHtlp)~vmTloz%O3YqLZF=Z!*!KlhfdQ2_ev{^iGw)9GkuW0j1NDQ``)Gv3&FildGI=r zCwEs`3wm$T=S}bKrq}-2lc+HlCyw)FTXBg}R$lxXL`os%EV|ZB*w1+oG{{SwDPc|I z?7;;pZOC+;Wq?ZLee%Btv?pJBk*ePM*?--HQ!OjEt%ca209%T8XGFlKRoJ)spSON@ zQhkrY?g2MjQA*R)%oT|uMUz@_sb}Ijxxz$en-`FVRue5)ymY-a5S|7u5%?leW z?tBVRw0ZdR8)J(^kE}3yeDape65Ec>I+X9w@h8^n37d~$`X87*y+9m|BN89*_8vT_ z1+`*}NsS4w3LcqFY0hOH2)dW%-(e%J(^!eflkbS^Jo8?95$=6kh$4{szXO~2xC5jb z-_REBVLj;4LAR zGVvGWV&grvWGMP_!Yl+kvk!wv=oc=NHwn4k84GXIy1so-cilWqk4sH3#)NIJA@@{J zh_$@`tJ?znHqCgX#I3jqQk9$_36iRdFx{g*J4UQ!KizlgIwD|NMsG64G%nv(=~$dh zJFV=7WnuZ;!h}-@ThFiu_X3e%xNPtBQA%QJs_Y{&vk@5&7r#MFBnk1SbRxON8}%2n zGhM0@k@%yx$f|Utsp})FfGT_P+}PE!r7T<;iELHf?Pa@o2Zp=dgNzhn^4pG3n|K!x z6VC(h;aL|`h?5BrQ>KUU^2PR;@lvOd(3R#T+BCe@$tv-<^I-4YDLP##`rq!;LJizi z#i~COs+_4>o)zma1AP_0x#GT6{^JdU{1Ek$m~j(*?u382%a)GSCxT5IKqm1ZmHg#f z)~la4BlhH%jUXIQef;Y+M9ucMa*YV05}z@Oo~#GW8-Nj&s=)w?RiQ>1#z(bx8db)W&Wn)}!JS-d3UA zqE)RE#cEce1N*6c(E*%oVf=SEE?Z887jsQzE@QR*?*-rCeP)b%z-ea8OW}%{Th#+6 zTrA~!j0W3pJC7ny<^F53Zo+G~In$Q`)bv;WYXg)h<|3iWeA?5$+7OOO=>kGz;$J&8 zcIlL9DLXq0toc}j!llRMNj^_hm$#;mk+UP{FCEC}#?J!MpOOh5{$L*{GK{Va_$-Xt z{Yc{U#{H|^WA{Uzv@QJ0{IJ~*Y#kHH7}%9jxI|U7A~hFe zo64whCe%qnW;;Am9TO0Yv-|pOvB$;(CBwP z#KesO4r@ahw(3yRP)Hp?0ei-Hi;{0P%qWa&wkC^9|?u@;ETGD%oLw)VEBsKSs)wcEyHWas7J zy3aw*F^_x7Q_S_VA5Y6~{$75sh`kY(wCFW4+8Vo1Je> z467Af%|LIs5KfutlA*~&hCW!q=xe|pd@vXJfXZ+ML z(2Bq(=HJN&mjNTWfx-8IPMuC_-shET{CQCyGotQbd_;F(7`eYT*{jdWoALTjSol)_ zs$_(D6ngTF+0-xK=TGg)B<3iy3(=vwR%O%l_jSriJ0V=H zf>aJuqIILAtRSMrb=cwb>O!3cR}>qmtHL@WT~^ytKXNV&>K$wT>e(mWUixX z?aFsFzdO+`I8aXgXvgQulozs8+5Lg}M#V+%i(g~tRdh{BP`jNy(^nWp8rFO(X4V+u zB|1*E>S8;kaKDJ$KOGBy@P|8^=sTJxoAbCk8q01a#Aw7f$0*^u$P8}0M<>z&`}Ljn ze(D8K5ZijnrRc>RavRkm)@un7x8!t0Q8-#VJ z1cV^I(Eqd6hun%I#As`yIs!Ezk@}DLQ#+T){~iVHj2xm8Ns5m6ht6j=d9Jd%43n=5 z6=tyorSATLNZ!<3n`-B4-{|;-c3ZJS;cK){z`O~3rtgJsv$h(Pg)5#;49a<2fsq^dkV z+``t;xg`_Uuo2jC`t$ItL$HC}>Pp8R_PUOelz~~ch~je~sS^j~QQ3SY#x103^_;_h z@2TTy#bx0Jj*rg?nx8WDfdAr1;oDu-S%KPfTH#HnI*zr)`Lt@v&c=iclf!1xtXh^l zYtBsyHVd=9$5bA=Eyd6yKHOqo!9i~+^u@mP7gmb48sr9 zVN&U^EOXy~*IX+!v5t7aXjZr)F`TU&+qO-}aL{1LDA(rUR?}@hWT>1M>Oe}ILrZZ@ z1_Hr$ZJGa~%4?1A z;?lRje)~=#bS+!pnev4{|0|v3AbTD}fX~u!#cd_Q*EDqL5cIwtHsZO@P27krl=~s&oXc|6<}>$HtoYYS^-cdfZytb zl%ChIu|od=9yS&t!pjZYEa#^7>Yw~@4Yl7F(gZG8H7=K`hv#^#n>9yWwd6RI%x!z( zwZA^0>a2iv&dg749KSEvGEtQ*T5(a>+ffN;bCLxZuN6BU@hX(L!(cY?tt1+oW}S7{ zHY`KJc2u?3r6y|8UiwZ)#_OfakEi02e|9Fed$(X@aU%K*w+oy-lU{x9&1-3BxbUJX z^|)yRa*U6vn!g=FUZbTkFf-yBEMm-Z0!k8I*aM~E2~dUthY*l0Ek6Hayu-e3;6?Qc zgF{ba0z|g0eVX^8qV=K}+mu`eAE7`s?H3_VZ?TryinAsTPvaA35#tkoN^_cTEB>=9 zt$p*!&(rjapBuMzs4;;|MWXq zu`Beg8;<|~W9_ZuqWIoFP!Z|wlJ59`G)PH`NQi)dw9+XhEVW7_0xJkeuAp>DON(@_ zfYK!_pfpR@&OLsAulvva`~GpBdCqg*Po0<btl=J<9fbY!j6(qqu1}5qF$y15f%*d(S9@CCND3y7jB! z9@~}JCMMn6;-(RC-0Plqyt^&Q`4T_Ob#_zvDt(Z9wo5>Be;AK*IN-O_Xo~Q6AiWQy zqC`OYpB+%;9 zY{AVlg&+KEHvwUJo3lxt!`D9&y-PT-MwL8=OJ`q6k^R0&Ma}1RBpRP+fQ~v@frlqO z_8En5e6fZ2l(zQnJA@?{(_%ueMeTW8jsFSyNc}^c{i+(`N-{EX}rsmTZ ziB2Z^oi?Llq_9}AQr}C$*|E4UXYJ*FJcj5qS;U8hs4`*Bz`xOQU-gTX=lMy3>fhXz@ObQqSD=6kuy-*7N*yYUetBTb>B?knW5CxnftnOrXP zQB68$hxs3Bv@HCB7Gu$99M~pr<9(U!)^Y>3*ieGlusBh(MpP4{I)1cLW9AA65DZNG zs@5W>QQ3BXaA9%nOy~lCzLn#&rb{*J-OO9Cs8TsJ-RX0Vs@5kYRgIgUDbT+Nq!3H6 z4o0+enU}hC%TE?mhv&F>|6aTGEzCuC1!5__g=l8nUKCAccb~)SXzuOQoUsE9dZcG5 zyj3%h29su?&gpgS$!+a5_f8+t2u~wFza~58+%uD8#EhP^9$d8)`n_rm4pi!AKPn+P z@6xB^w6HkrZD;>=j)&>PX|3L4n>s|CrlAu;PZyVP&r{}rs2xmT?8K*~dyCM)eFTMj zof_8tuS7oqLw3&=Yfz{n8g)N}@cc#6t$rIn>zgz5UHpHNezK697YBIt+cfv6E3ePr z!c@l?B9h=XxR|(yN(VtUtY|Vs$07^SIXaUCVesGStB|`n20xHYri_>wRh4_#!i`e| zs$QAY++{s>Lxug@>Qq#wVwjbPJPC$3T^%8I{ek!#L$SqaBM-cFDA`Ac_%rM+`gm9o zqxa|=qOPv9CJusB-je5B*bV+3oON6;zi|14cyyQX0tXfB0In$~vUVFpCrYBM=z0|P< z83Q+DXzxbbtM32BF)wd_Wo__?K~<6FHZfN&5&2Y^y66kM16_f@n}F0&9O%B7e-p?@ zYaJKJC)XAQ2ty{7?Xxjc!k3*l`qvsNqPLridC?f(!h0{XrrA+8cG@cabTm)zRsL+3 zM-Li?Ar{=WI7|-&h?+_j&f*?bEL-&_4>HvFtqJ!(Es#TR+RS%u!9vcs?NO)t_fDRM z-1bRen&S*MXpY*blDAd1M|O*GPPvLx*LbD;hwyJ-;JAd1zTUR+l(QE5=FNzHg>1P; z-a5tI+h6iVu~NZZCu82f8LZs_A#G&qT;zYx|iU|@^Sv64SCam0uASVRMw5A zzi6I*@hY*dtscIfd!Cce718Ts$_R@{e(?Er*`CA3a%Qq7lekQTMf@VdVo8*^X(WmZ zF~>F#Ils$smir^D%dnqyGJJ`U)yXq@-i7$A;0Nq0S=Cel1>ou@wE7n^JzveYdET{` zzp;=L`4tnvc`D%YdVW=vpkE1|jn_0)(u`Q`(2bf8HVAID{$BVJL6iRLb+(kkEgxjH zC*m^EJz{EW3inF?&k`@AkI$KWKkjD45|P$kOd$5P=W>ABn?v({L5j}3NYy1cq5Sc9 zs3Lr}lNUj-hliwzE@vfy{qfnCx10VM*x%Ln8b!qYxN{@&UE8;fNJ9@m#u9?m*k}lq z`9Fk!+LR7JR59QnX{;wcRp$OvS)dZv<>K~Z`={T4I zZjB!hx=TP(Er85K7?59HAfLVA8%UZ-QhY$j|8e!WNSZ|t;IW)SD~zvo1Egmr1QK9* z1Hdd{z=zDDJ0WaB9t3d6V8Y>bP8p&<-OLI|10f*F3?ce~@T0JPg=mnw0uAtJwF8_j z7=kzn>Q7a%KX60zzcd4r>~uGgG|EqaU3T~!JBAGps9(B}^drpxK9n9LH$e)#bTEbJ z%h|q(y2_ve%m>g9w=%AS$3u`*`yQ}%MMUBfGj=*q{w^>0na5? z=ASn>h`#zLc9y;WMG_DJe9*a;6o9Pk;GHvo@i#O95g99ik&9AT_RS9*#5WHm4bk_T z1{0Ki8uS&tB|adT*Yfs#)yuxmeyi@OzT5=S-3M{MafP zAilI`*?*)NL-fnX=tA_5Ji*XRl7gWP4F{@E=~y{0(|{L$yv_}c1toyrBL;Z*&0`|* zWtx3?@qKkboI!kyt^aH&v4!Z9GJ#>3d;n&~)=MzFThd@awq!wvsk^;K#gPf=Ma+K@ zILj^;1D&<$1wyr30@44D!R5Fg0!Bb)7WAK1CCGyX1w4P^1C4so05%3jtJ*~Y`0E9W zv=<*FG~x%k?NDM*-O~_^f%^f;S+?_6(AN!$AdQBLXDsMRcA)Oj0I(hmfb6V;>P{c9 z#tK$T94ie-QlAy{&!`?CqMX2-sR3|f0j6Vh3-?*J00|i65YRJk6+n-loE=f4bJ;-} zZ8E^d!9$?5=m;X}^uzJV2dhO_2gH5J2*mB+tno|*jNbGT36M`7MJax~bCxZ)tPEmo z1u2p%fn2?ZAXfwK=XV5Z$x1o+z$<&5Y&!DZi-FXY7xZ@MgiOYZL=*mqbU zD;5Ie&x$N(5*6?yP65Vd0=6%IP;Ug}TkLIkux;{vdGC2?h##5b}u`R2NEaqM2L)v^NQ4ea;0$hYvw@G8cnz^16kjnV14` zc#VP_moh;LqYusrG4t)fY3q|A4pb7b&F~WpIYSWq#<>SrEZ*;Y@zI(fzV4oX)A8*D zfF?f&WIV?Zk?sMeNV*u99j2eb&a+s=*Tn+TK9)PnJ{z+G)zy(x&OLK5?7RQ2 z1z{54lshjAR{vEUq||@e0t^w%0=vkle_RBxq)xXKeFb9zs)v2gIoR95Bnyh1uvrueY)YmA zQriOp_PPx?FHg|AQ3+ZH|AlxJ2FUjdX@uCn1tT^BMnKE}3{({<;5zio$j)7Z0U{He zXRaz`Kw@p=fXfp(%MP>xZWkYcK9`jMcC)~;{@?<}&dVIs=F*-b;Qy{ud&5Aqmk5mQ z{X12|H^XjYP^Ge8f$ z?AD_6Z+fx}-Z%ULU|{fl`FaZzz(rB7a+uNr({$j}P$Fz~18HQcOr~7)5&|bMD+hjH zbrW0?{Oj?6t_!$^JXHXz#HtNk6J|fvO4_3efxJo!xMcnpnUwU2&?MzYgR!$F^C7R_%yah-Wi;Vy4qv=u+Gr=0s?)oI zJ~6-_@SnUg$hlg;0Xx7HdW*U#5{Jr;AuNt6*mX-XxCCVtop`~M>y)>^m-(j7^{i*l zmz;M0o|~J?geJ6Q`E~d|NrP}cNpt)xztyl=cFRQ9F#I}8urp~+fYUP+ji~L^$%(1< zSy)(mA5`QvaBPY{kf$K+6G7y#7JcY7ba8g+GyOGRClKu-pYK#SuH9mh_*Sk|O7x3a zVrheUTywQi+>?$14fZe#mmu$A>e{fk)WLuBnr4pV3eKB3F_I*wmtE$()<(x5e!vbT6WMa>N3=DgUz3`$}AQJ<~6YFdH5?qKucz5WUI% zeB4U&E|DV3FoUo)!4RA7 zw-|SA5_W%PDHgK3zU*lI1N9v&sqAN*?J<1lfK~%>k_$EbzI?T#*Ry7vS<(O6)o08cIrFB?=-%Y#@>-L?)327^-gXuR z=1FUVRl?v0RbQV}Hg=ZMh70XV`ojdh&H9IvK^7jr+$a!A+3B;2IRt{a44KTWqM;m9 za?@FRW9D;_dt*$^J?jdU>E&BrR%A~%ezGZBR9-JU=0$(kE*MQ`AM{t*E&MrGr%~DA zo_70gaQVEgd+GcOPZIN`+ML^IAqGTAdWKw^r^d{`+&^hVp9>{eg{Kw^_0T;%W7{!) z*t?j$Cl%r~`g+c%_UO{*Fd%VYIz#jmeqoF+*!{z&5@ zGus?d$Ti;b!cmo(WO%kxHIrm`vGRb`iC%tAnjA=H-)f_zGVeGA_X>ZSIS;;qUY{*B z*jW0U?tlG>BM75{kiJ-pk2d$hpd`}*tWyAXE@ArFsMGLT<&JDqZqR#k>?6sPInkWz z?{{sCdZKvWrWVr4ny`8I{aHM{ZGhY7!OHZl}%H#i2( z&y-?XrDP|-6BcRlWT>M>#q2d(D~ZtYmM`;Vs)b&zxM{vkP8a?=_82dES5m?XFXE#T zchXX`0a{CqJCkh~nKMi&=o>i}&kt{Gb{jT#)ySo?@KlmqzmoXKROYCJ5qk=io-FD%_Z{sV9Wzu&%T7= zJ`US=I*6Fb+zo1pM7r4 zgvQRN$OBKVs;8FbuoS`-V?@rSNPhY3!zirK|HePn+9(qhN+-*D zt4S^;MYl62UOSzsDx|l~((4nTY?q-6e|}yCKF#=YN#=>Gg`Np)(BV_?&le`C1C<#j zTScX%$5!tJ&*9GFuG|T~9?D9>%eSE1k*XG*6WLp{H<>LFxYyKVs;MV~oxJqZuJMPdvnpYBgiV&_CBFFydAseC zK5ZT}9>=D7kJL7HrKCR8se8(8N9!#$=G~rjK#(1x^2$i%+c;gYOKNlu_>JIO* zAmQ|6;ueF0)dZqUXIF>Hi!?qMSKJ^qKE2;coE8;zdHkOt9*Y(Qr8RQii|HRUn3v^b zN}NfPHF^AKEwVkLrf+?(auM8bn1X(G6drB+4S||}ht#7_wUHQ$A2bpVLc61L5e%|3 zKWKbVSu#hFmNNeY&lP5W(9}37Ez(FdM7(BBC95=_x^t!_=urF+4|orQx`7=6I>35{ z*{d7$A9=aw@9Qj@F~>*X`)r6IGb=*8Syry!POYHLYk1Xa_|<9z)oO&*YW`EJ5mT!X zSF4efa!nuXtybf!n^*puDt?=lj@0q6HGGU+Vn$hraTse@ZL55Pj%1zS zQFZK!5eu5)QFSpYg%m?lY<6eq)f>D+3c#>;3e&S8225=?BhN{Ug+8EO)!gZLt<$6B zu59`ax>vvzeB4?0t`){}$VA0KyyPzce@@nIa_m3?Wl)(j&|C0>tzRKnOREP+n>M)bi(?Voj3 z0hvRUIS~oKk%g5qor|bpEWTy`BDgY^#Mnim`v`qa*|&q6<#`U>BIQRoGvM6AF61+B zkgjPHx5tr4S1siv#1-3}765?%55Ud?00UQ?cKW{B>kLG`nd%ZyY+T;ObVwza%7YIR zx0BBwe&1e4pk-g1k8lpaZ`Nk1OQ`7lufgbh_Bvv?ih5lt#vYq`rwACW{h!eeywm|# z@&{oIfw);H6Eytw3zo}9IZeVR$wzIVcj;G4Sl#GWx)46ze=(lD3uc?js`LO436;zgbqC3iRVgB&RrQV#E z!8iu zj$5IX?WAtkxAKg%*XELXxSt$nwX~TDv_|U4`=uxVj2*!40az4((R7wSVEN1L!gKxn zDUsud6b)$S2knnQdjX?!VF|!kbe3oI-prV!PF}y6al%C{L55%J_>>f&GZ1wdclM-b zKjCJNU;%*HFAMAF^8T zc%j6aCkA(J81Xm_3ylg;k#7BwiwagZUu^9{A0IzY6O)yK?fuAbtz_-?L0gNcSi4|Y z<@-Kw!QZTP)9GTPcS~=yyf zjq)2-_ZxxL+&70voH+ukpR3q2Q+`ix7)U1L5DVMBXBdf}%%BqZE*hVmeQfEfZ3IUF9|-#tmAfHnIJWK5t@?ipZ`UVl~;2)l!)HioP7zCQA2-Zzr@ z>41jYOO{$ER53r;87|#)LzF5EN;~}BZY@FVQ?md0KUz}6K*NVBEs)9`__dAo1sv>C zHq)B)ujVc4kOU7c#g*3GcYB`ULz7yQNFy<{xFnd~($MuMXfygT_L2Ihxk@wNDF3|m zHzaNu!g^)ym5Ps_zue>hQ0CX}QOWE(W?8W)UW>ktGv8|XWw`)Lo$31hFrhD}jT!YY zc)l(A#Q*fJj9N)`W7bIUA&=h3T9=ga-%zRgUjNCn!d_RuHZ|!mE1D6kAl3S-ABF4R zo;>w^)RVbx3#qLy_i2uBdM5LsbqkwDyTUV@TM{$3a+*s^I#vq#{Q8_S@mG_anMZAk z#`?U*BNG<9$df{hXvTaKn%=#P<&$`p+2s8?F|*ipV7-{dW!<^H^LFUhwx++DaNpsJ zyX({H|4EG$wq(Gci}`WJEbWY>`99}UB7OMNMa!ZfGmVpWbl3j-UM>58b)bHdy-K1I z%1-=i?cAn&$D=LHXzB?{ce(SMs@5_^cD$eSG(X#Qk+h;vAs_9uy6x0Qxb@ZhrO>f* z?$faD5{GrFqMD5Rw-Z3Y(Jp-z+w~KNDyHNX7r7A$n|DJdtlJ!qhYm+VTD9A*^k&Dv zPrSglNO^VFi2g2Pf_E_`6kHTWoEmI}TCs|{skYU+Ih3#uI-Hn0^=fFl(y0EfpCkap z2tbk?M$#I$3q9`@`T*z-fNcWUu{zpLC>Fpd0gMa4?voN5J^1>pFb#mDsbC+1xG;C< zG|_ghZ-7HO01`%&hz#!Ndc4;4H+<=|8DFDdWcPj{gZa}ocC)O8=6bQuARS47k zoA59gR&{XyYDrA{R2NG6_z^rrsD3$RfBlNW!OGNnHM8=EXYyR%4{ThEr9B%uW&N{Nr`*c7r~KPikP_zS$V)h$)s#_B(W}e_V)~kv zl3E$tue7j}-VblZ&I+&bNnfI5ZI`VDGA{|OYA+uLe!iNRN+9za$t?;wQBu-REcK3m zGHf9o*Uh?i_21n7W%hcF+RLO*kOR*-_M;zfFr0Gz?sT!w1#;on7TpqicU{y3R-Re^ zvI1ZKQkGF}$HsNJ4lZNkkusP>%A>bZ2XFiC0@(wQ;_l`#@}~~}g)s8t)xArhKFHBK zbQPcM&BKALGbsC7rT#knX3Z&7x08~V{bPYLeO`FiLFCcE5=67X!iIdGoOUs8y_z%$ zUE<*@)k6FokVP&=MSLp;t2fjv+A7svRO8;g6}>j1hj|m7MX0P} z$@%Nk=dEf4sENEozvZh_m8*|eU!)gaT*tp2On_BJXQk2ktnZs;bq#v@;OucWgi~!U z?eaF-CH|gjLk}*M6ntehcP@L;8e{4GY2=a17Wfg(uYM$S6sJk&sfYS62kBJTodBlK zk-IfA=)#Ji>;G!*pS$9t-k8#%Vs_l6jjvOgV@T#&W!cl@JL>wgZAO#*+r~+k(?=X< zzlnw7&wLY$udDYCo9X#>?tqKCk-m#RDy9jJ>7>~mqV3x2iTev zAaZ91B89PUmqUC^PkHVOM7xMbr|md2nW(U0n zdBtte95_|pt;+XH1uN|8k;t(*4IYTCUw?eQZDbcUT@4ZrweAIC(ICL)ge zJ(<^0!>p#8l;~mF2XVWB@fYo1Gu{%L(0a88es~+dA7ybV-o<_OX)?_I?MG;sLlw#} z499wP4=4&r=>LE_egr~oRcPYu!K8gT;B5h5EF3G@g?H`LOEQZhWi)|1c7KbJ%eb%E zOR8uoSGJjy;_Qea5@`zQ5)F3mc~d1Hsu?E9-%;~<7K!dB9LS(bR>zenik7?nAQr~I zp`}fph{(RpDSubZ=Vc^%A9%9Udah`5r9Ya4xXr;Sud3z~xlG2K{ke=QhH$w1L9>w0 z+j}hf)<*9lczF09XeTBkvUxe>b=7<_Bhhj;)x0Mrncsd`+sCzuI>oh(%EIY4Og5nZ zE;?F!*h^JvvD{}CUGvPwVP2{moBK!xkrntc1+Udg-g5l$~}9!ItV=7iiu5 zH@Kfr#PkIvDaan>rdP6H$FK(P*f+U@*lK0QMaS-Ix!22x-g&}q;!!ER+560`F6gWD z#lG8QAw!GV^U2e?-v9R-!@8of=~CsbEY+S&*bbIxjy#|Y&3l2JKf&h zdkS?%r!c(oQnPX~gW&{zTK{`4vR(%;y%UBg>; z78biPRgby(r7@M^l`zoyN{BEH2R5Ff53qz_X zT^jU2>$vz7jPHMP3$CsGc%||dLY=@#kIHh`A)9w+>}fQRL5q$>T26r2j`%mr!g(Kh zjx?9SMQG4?cb2Gj%f*SF^8bg|(I_|aANk~*rmUC3X$W4381GYf4HL+y`*QE{C-7kq zGwoy8ca}+mReu}N@>z|}F$$N5o{=3gxgk`=W!(?H!2L0K>U}OA7|qz`<5>HOm}W`?dmOmq58S~a#hM}Q`MrJKd0wlr+E*&>kG~R zQP&E_8S2=b!P>lT{&;oG4j%9a--#AL{Fn6qeKnG!Y{dl}~)->$Ii zRr=QkNF%7l2E75V!J7J>XWc~}5j8(ujs62}ZvC6BS^dMT`pmD4nk{as1zX&-h%yfs zlBvFRX$%mCaHsq zt14%XX49b1@y~6m@u?32FVvE_LtW^iTRPmZNn{)Frlb3tT@S-({94hn8hsDL;_g~w zwu)DIJG|)u^$vU}Op~jW&R>`4c#>mmHOTB?A~QBw!r308kc9Nf}UpW7U4i;Q6GvEkQNwi#YVVyykDVMr)0gDm7{LQK8!+;w$QLrzqi;W? zv);O|(fyWHfs;y{T{Zjqr=m%Ag==<2g||%ov-b^8woRC4pGuFGPD7^~$BAi)dyP9C zgQrtGdW)yTZ9}l!)2-{EHkJ4=7x!SD4msLEC!8~656dgHJXxG5HLqL5QH!Ls#BH1c z@9(g2QhAYr`faW@RRNlylvwz2Li1P`ykYy`D(OtWO`yF1o8Ig|C3*Xyj$>7a6O`r|Niw)(LydS9iBL2JO*s5&);I{njgQ7{Fb2CyLR5VjKv2HzH zPLhmqP_lDb>wn#k)+|5&7x;j{;@5wv9>27vzL-G8A4le!zE<@1>qKYPOU|l2BW83> zZGA>U26HUVIe2JccHl*7*C@*J-Cu$1cTD@8pVu$*@)idTNRKJDzes=Bf1URmbKan( z6wJ(}%xLE}j|sI%*{yCUe7)c?vKcdzx#8jLW;i-Dxso?PpV895i7_U(C?hB!tB{>< zte5}o=@&b$5+98GRS!IXbD`;G=hljoLM=%+lR7TkspTg%pf zAW}n?b!Yg*DV^-9f(1k(G&zUuAo;IH4eeLV)^+U!B z?D|vnT#M#^-O!{xZHrx5WH6kEiZ$Qka=lTcG(?BTa66$Ay4Tt4*6sVaq3Uwz!)E^{6l*-uopX8Z z^ETn-tcJy)w9ItKXg`aEtLFItelVj~xfW~?Ut#tez5@b-!bQAGX}BK~ z8?LTFtH_nVOihJ{j1`d6)aVrEBKGV&V>*bB4Sx>Wxbv5psqmIKs5d_HQ11bY_cc0I zxQNNA4fheT;qky3PyR9wa26-<^?^pG0T*#1Xrk5ed%4)Du}{b4ofaG3NSOkAey`I8 zsL>eie~1kq0&4hpTG`QPh<&ZzMyvsI?NXh(grrRxx#0DSXWe&{?nRzLSUzto!6o#9 z+F42AWGlB4TI{}WHX;i#Lu(fVLBY^C6nuI483hGpT`ckLJPeuF3*N>Wme8Ff={rh| zD!bR9%Q3pb;0#_No@|p7i?(!s#+nl{FM!6u=+fW&HeeQ^m(SZZwnA zQ<}zKswliX=4~aWWiNPUWgS}Fma@6W+s{wDFP)}%R?{W|-ddpQr99{ShQEd$`(7qy zY$ZHkr@OOTH+*N8`E2;Bg3l{&gH_EFRKzUf>61m5c?Q^c`(An3b(vy;yUmER!yNqc zmz{f%P(wr+?_6H7c9-28uZ0ilq=B0VMtVQ@j4`oSMV4&~?V!Sz`%KaBYw@+lh*Ja2 z(q~3}8B0&ab4w=`E4%Q6qfaiqwdN+PGwhy>s8Hk-_86<)dl<|%BUawx2{}5jJs+vT!DWZm%(9H@&6MF@H&hTy6|&9c>UW6I zY66O!#j7&Zy+hiq{@^6o`{hO}XRpH8?~|6UyNr9kcHCQ+@}dt^4FfKde#Nt}>oo3^ zXi6{}86{4$D|I{z$*!BkqDx1yem{y%oGu%R@A_AXN0@#Vf8H(eoNsDSi(%qYE8Pt* zp)z_`Yf3DaWg(j^A!90Vx|S#F?QTl$Z0%hi#@gd{TiFn^&fGTdWTn0v5lTg;H-jtt zTTbSC;9u5=X5EI$6r6|By)yn=YAka{)vGT?NP9jBGwZx3x%0l<0`)}bp>l*%21W)&5GEtm=q^)519Nw%T&Pa9 zN_Q1LT@$xBFLCmqQ?~Xhn+_-&k$M?D;#Y9%Z=WPSGSVM^snmirOs|mTieyCmb8u6~ z?i%)Jk!jEX`4)Qyl?8jxo{hI+)urz6Fcw*_wVEu80U><8x7U0SdAPm;tN{CM*{kR9L#ivk4j?0 z^~IGRz8_Rb7?#24bK6YAir(qHTy#Oly$fKDlK`S%D!c`7C^-ube@PV=q)B@uod41G zcDtUGNdCuXRCuhP;&t8w)`1F7I$q$9xrXW(qY7Upz2PJ2!Zj+Z=Y^t%4YA=3RB_GT z{jy_?vEkn}X{AK-Kl)JN<-`jJSZSzkaS^*R82)I94POCGV)-9~fJ@NC{?3`*$|gTd zBOiV{NR#)Th9SNNZ2>rtYtZtlY8cXk8qT{xT=JoO>epR0#k$L1M1<(%MB)eW*fWnw zAtcz4pct=1z?Dj&tmqd0jslhGi0-O2PBhkk5lEj(dplRt0WZeM(53UYxv}*>5#v7* zJ`k034m%6U{?jHWN9ETrlS@m?sgbT|fwc^56BPjFZ36kfri& zYp53xDU3aW{S)Q?6A2D?7e0;xx|Vk84}{Etxj}khZv3Ap`X{CFK8S?@<$t;d@1ACZXV1v`j`G8C!z>tMwi`6KEM)Bu(WU`=Ql6on!(e2}jZ zMvl4bDukS0H}5$VeB8v!p(6;?^_xa&sSV$ocpgquYQolJ3~uHN)XuNFL<sOc<`JI2~XwQ7)qYT=K8t~A_J8P?&hTwpMDzH~_#>ha*` z++PSraOHy)yXFbKI`ue#qwA_nz44Rx6Vn5RZLXntfzNyR*SjH7&a)z4R4W|4B%$jN zcq*ja-v0Lz-a2vx_rKn&tLuwydMGrv?%_SQ{fT6D9o1a8!=BaivhFPO*x66{ueYm8 zS4!Et@rjB5$WZOYB1%if1N+a+RUc%Uf8$7ki3(mLnf%(6>PW}VJ(#$2T{DIa+(ipX zGKT`;S!J9Le66Zym)9*nI_PJ34f#06pmU|Zr}-a-2+sArjQHI}_4uT^i%cHBTStEN zIcd|=s0U45ZY&sMOfn`YRR%3gU%D3HFBvG1o7uPUxZ!5HWqtVl;|4Va_z&W$c28jm z=%b^qh`qqYqELm|Aw%?v6@@nMl^C;uMe!TH{<0+Dl*V-CMO-4Q5g|m7#lv=E7|KeK&H>b`Y?05b9pFBt3e>$Q?ZlOI) zS@EQ2py9_;*18|TNS5z|6lvcR|C5evX-xkqQ%_VJf#M-?KUQ;n?yU%FRMfR&s z%VX7pY9it3>T*y6U)-C?I$M}U?fR0-NUmdT{r*x}{aJf9XU%@&WAuhOSw${fwB8ym zRoC>z*i`{joVRdN{rK{Oe>Ayo%L7BQWRJ1uz0RydRn`7);-{X?7tKEF+H|RwrSb62 zU90~Z@vb2--Tmp+o56<4^gZW=!bT}SgHfSl?Q;7+O@o5Ru7=+}>MIHzpTHfp|J3Vm zrd+b9FKJB_w4;#I)De@;aSWq#BIkv!X*&w;xQ}V-U;A#%;pZg&#HV1jW2q^rk*NzU z_n00I&vYgfx_h28D`XaSuwv9A(iW(S<-}#%I#7z|(5dz|iAX!58`~}lKXpnbWL44O z@w*bp_;K(lQmrp3nK(yKN5t=gKog~4=MSPho>hHqo-f)merf{0U%dUQeYXxvj42zn z?-Qne6cWp$(HBWg)xp8b9iW?gGyXF=aRATuUG}{AgPZgZ23b?^zHyn4iRVRX_VL8A zR%u)M=?cJ_o=b%DyF8}$?Ta+cjJMYU7MNL6Uc7AOsvZ+Rcm@pL&Y?C1)iEl;nSaXn z`y!Kb75G$c#G^M72UKlUZ_bOGr=r{)3=2b$Yk2Ydm=x9qss(B!Ec`rUN%dDOSw0A&FX z!skEld3<7YD249w{wHUHSDkOPf;tx>q|Zs72QZEJv+P3?N>Q$NqIY>TT4U#cIYP#q zFv@$o6yKJ@oOHa^kx`RdZN*r`|tmQybaFS0cP zyQFY^g$(!srBAGfo@m!6jh2oe{q+~TLMyAG0Doougp^!MvH)czR#BzP{PM%|!kY1T zhh2S_LWP;6IsF32qlVS|C+S#5{adQWpStT=nel?={!Du%P0W+UmBRGN6(33+^)vDv zopB&iNP=w~p^qug|{rH|C*^W08#o@HO&rw6fHri^0bR*VRRl zJqf0aE#S}vPgL2Egbh|sHw&}=ul2&MqW_AuR9u{80DJCQy&j;w>*brauc9RZ&#g7y zJ?7b4@(of}$%i%h`vh5;m8CYZZHlynWPY zpkJ~g$@Ulizm{qx1`csuKR#4VUNg;6O-H8Oh6c@HUc>|YYop612KA*Dd~#)atshMw zZU4+*UKeStL{m4Jryrxbj~=_3+%AclbS;lwoYSspj*;A+j^Qa{k?)!!=VIE~DVGh7 zegfI6*LN=CJnbUmt2dmJdD?w++jVuh^l3`i>90b(c;}H!q;Zx~YCSzR>nHsZ1D|}z zJkxm-qLU@Ng29UGTw!A7C7La{4L4`!c^YEgW9zN!mxu9elJ|DZ!rn~^Rwgj7DnE@U zD=Y0?+|KWT(HpLkPySl5vX>6il&{!Y^ZMmE(N(Y;1D^lqN#kt_8k;GLQW*HTFuI6s ze2^>p@?;=1(*7@_)6#})efLLA*OjQQOx`8uUp1liG92~scVFz%di?m?yt$lrUB{U< zSfW1H_%nKBxU~I~gm0~S|FFT%d@H>~O1l%R$xJNL-mxD_hrq)%V##sWDpe0@AK*a- zamUSIQq@37DbrAl!VDDia2p!BI|HSsPD3rVc^}cwA^6+?7Ha6ms>BasRcd>&;GXEg3pMOE%9T zn}^`21CBZl;O4X%nkw6dxOq>YDU%Z@6&wkSPoViJDvgN1jLW;>FQ%a)?QJL>mjq)6 zsdNrtkw6#=e(bfmPf2|V-`yg7_6$-<4ppI|y@V3BFXM`*A$Wf~k}&=n;)uKMXiT|= zNE5E%>Z#YzjfCrt2f(@3a|o&K0NK!)QL3W zcsl5Cc-j9|wP&EiAUjn6l?Nv`APb?pHH3u-FT=Q=f|vGIg}(rO2t0QWVd3??*bXqN zNN~avzl4vPs`hU~7OvY6ltfu;k=d$u3U!=PjgNvVz1lAxq%`fGIU}&fDcP=TvKJ9tM-JLv$3Tn0Xo{KvXCgpFN zWYSMPyp#Ph$5Zapc3n=BqqwEVOXKDsr<|8NE6;W=-u<0Tu$(AhK~|)ChSg{{OS@(I zT?V=h$baeZbF%v4*XS?&ck$=<@#{g8&jjj1ZYn{v$@4>sMhgtmQ#FZ0Tn;Qu#Y`7z zlQn(lDIY1{2_G3kgwOfn4<81Vs%1P|31S zi?QYVrFQSG_~WsLk%D|DgS*w0oO17{q-l?&Y^~ILDa+jiu0tPKPwh96 zRJob?B}_@bokwjkKYKDgJ|%Uvvo+z*DkzB^K`ou*YVKXEt2{WwSi5L3 z8SMqOZJ?5dKG+A8-xxVKJWYqsK6Yo|avY0{#QK>35vDo9c6~n`cClG`Za({*LU}@p ziDmoEtv($*9Ul!HzjCvW_8W90TU46kT^)~BK*4l%P~zBX-h6lC#EO&a8)+~X?(i3W zwSNpAV4nSB2sF3|0iXP8k2C*Yodi_Ae|sCmMK=7Ie}&QzLEtz}^Ey^Q`_{YITat5e zx|DH`?sVm7<4MlPncb<)>5p>yTBS+L(c=iduRV9H#81N#X{0EAt2A9|w57!+~#6eX8 z6v=si4d?y)8m^$YgW|b=f5DuHFj<`^Kf)kLzBZeXLiz97un(8=+2?f0p|~4V6OL&l zT#^6RM)iMfTL0I^@jJzORD?f2(m)4kn5+EBGDz0xeS8jWPhxC_rQaJfx57LBH%K{U zb6HS`Iqc76*~8^|8@Mj`TC9*kF0!NJ%1B)ND!}cBSUh67@D)}cwueAA3ZA*7gdZto zEoCH4UN@fxpY}o_S3Owx;z(B8dFKGU334-UcR6hQf2ey8e=7esew>6N$&$Mw9g*SN-O-RI<7 z*L{1OdtSNX?Rs91h1I`|Zcsv}@~`9PF|NP+6Cq40hR7M8O0e&o>=8VC#k5uz8F~CK z$9-j4!BrkbXkcXUdL^>#YhHx$yA7`JmKTn43iv@!3k+k#*(;a(7etR61eS+V!+xJW z(p76%(~jOv#piLS`5dM9GJi_j_L$K$ZLB#ZKzwDMllM`p`M>E&?J_H?ol)POxgS|6 zOdX5PahxkloUz9t1AG}de~<5`>YSX`CB~ZsEjfyg>AgYesE;`s<}Z@pdETiqRitsd zTE1{}q$tsdId^)!LfDh|%_GnJ9>)`YN_Z(@rVy6L_#-T&qXQ1vx33b*mO>l|^Fobj zjSlTay>C=?j8cU)S>JT59OTMRWTGrK{-qD!*6!8j+e@VCU#E;d7_5@6#RIJtw&2c?8UR#JZ3FzAj0%%l<&AXFz^Ba`r{{ zjc4l(?_ZCi3|S2z4m9kWe+!TDvnd97d&+3mNr^YARhBZDv73{{=;RK!i+FKD-o`Um ze;o7g*}vF}ag^5jH17}Y_G)TkpWE3ip>0QAE1dcTD!J=@Ab8VHdt1K7|0UxpmtSP2 z$o0q#W`UT2 zGu2=bt#ZAGG7*%Y-e#`3H}+sGZx==Dm&C$pzYJiV7e|smg!#d8Ow~^ImyJ;ObpM&OZH~Kyxy< zPQjJ9@?ojO|wY6@QnC}t(U{p<(VI{Lid zqymaez(#chwo98dKfi@e{Y`tDx#O;Y;;HN75Z(}i0lZj(0WMmt^a7*ivfz6r6x7*$ zqTi&nTLaDM2{~0OB!W38fAMxGQtoqWiZ?L(@tMBsj@gd*YU2Fv`a60B{BlLP9GQN2 zY`%$vorw(b;?w@cr*@0p=gTn;0R|VzQC#cGPo&W$zfQZmq9<22;}1_Kt~$uZA8nM7 z&3K#>p4p$EMpz#6p5%<61;x?$BjTBwsD^EXvHQWtjf=n{bm}B}YKHq+7kZ5->DCNe zjnRPFJ5hbT;=MI=V3D_(Luy0G^*^4|U0So=4Hsh5UJZ?t=x**NLHLGVnTzJj@(hUx zF^~9eHEmi!%Q`yX@#QpjVII4|K~#kJt(Vww$$X2iznt37k(Z`qk~yigXY6gyu*t{v z2PaZ$2g2$~Lc%tR9ooWuG5H5Ua@F5eoEF&lS=r!Cu;CsF<4lVp{XZkocKeJGLlWUv zf+iC2m3M4ObzXGqX?gx-P$Wkt;$-)^jOWc}V%p8ruP)zh zhSZtG?GfL-mvpi*4waUg6j^8_)CBB%z3OxO%S+{A#jCCJcCy^nwoY*sU&K!-=M6h% z<<{)WMpdtBhEwSeGY{?QZ(PgeyPvDkb3OdoCjU~ed)__H)93c1RxQt$mD7bhEVd@{Rq zVtkdHw`a|&y(@%v%*AB#Re}!(TE7uB(792@jco=j8h-QMcdM}hngKchQaDQ%1fiqU(=b1gySt3v^d zMdIEnNjh^>>`sPcEQ7s*(>U`v=!?^$keW2}2>ZDQ6FmHXETwQ>J^oO7Et2;-mTrvF z=if4v0U;g?`CAk>nYV9zDekKyqYMo3h%s>?>v*WRkny%zO046NvZ3?KNY~&PPlnJN zzo6ntuv~}KhZ|}FMhtW&j5v9$9DO)ny55&${QLEo%!Cyx6nLIVCxmA6ns7kD_)jgX zC6GN4oBN+iR(~S4FepU>B>`msI519nxG$Hn}_Qf&@Rv^Q3&R2Z7?>_ z_1Vf`0w5wFG9XHzJ3#b6Opi~f=p>j%uXu&s_CFtH6K#BX^XR@2;Ss0NEyaa<0E9Ca;=xXLM_Tc{>|4h4myDp5 z35Xep1&9@h?Zz`A^;bTjqj*@)e`($R!Z7?3gN%m)=WB5NGppbq4<1W%2FfZ1axs=r zGVoAA^$=8b1#pAS|fBI3HtN!JmybP$t3Fv0Rfdv9v)Vxlm;b=ctlU$ zhRS|mPk;GB%18qg)Ltqe2L+{<3R*zHedzlR`WA$NumMqd8XZQX<^|W*)&as-riu=i zmQvs*N_=MtXSNTC@R3Os4PCCRd3dG5S_@b7AXIScbRY*SVj`a8$X^P7>xhZaho#Jn zK(sIJPO&NeH5}yN8tv8sL*bU2I@LQdMORg97oC=mO#=;VS4Hnl1U2zhG-4oK-EVCL z*>hh(2_r~~5Bolwx?GGU;aow5oZ`4_bX7F%{&c!Baev5X*`4dE!L4>9`?OBhS!sU1 z?X$3=Uzpr%I=5Zqc4X_Oc@uVDC1mhzwXwzfh?fjuUOY4>+_Edbv4wX^JR3`-?W6f| z>f?z0^E&K0VK1Jo#4~G``tm=J5+Q_YV+DCtEEnuD*?;o%?_COCo#@y<7MHJU$be_5aS`OBi5-B_`^(CJDce zilQTx_2|NvW=|YB->)^7)Y|*1kS?Ft&V9x4kpFXHYoR1FJ0Q3-e~eNr!uD_y9|^Me zPv3Gg^zPXI1(qJ5ye-$m)rLL)P6U_H0){F z|Fg5f;Zo0WSS_Zff5fy^KCeet6mdj)6y%6(5)}T3+aMIQ)?E0>ps6w_=897sOhe;9 z_*(N6C&FhF+upOvIfH1+-%pQ@mSzj*Vw-IwD5%r^N{*Kmme<1#%&mD;wV3Ft6W-CP zQ4#zAG}X2yP6svvX5@TL*eXJ>2y*ql8kQ3O#Vwdj#ZFFpVoWl({i7+N^yG@^xPZE7 z<3BqN!EatYk6(`X6Z%dCmSOkQ&E2q#a~krQYO}%Y(HXjye1DtN0QF23vuE0f&o*39 zddm%-`EjMkRU4-##q^r(2C5OeXZK#XQ_L>c1OAu8jQG(PZWrK_(FE@ScR{El9h;_!K=i;X z^25k3KR?F7`5S_7jcE+u(BZjFm_7fRe0Pa`{d;2Cb~UNUuj{$m3(T_XpZq!YH=V{T z(#c`6$`E-KIYxx>zGGoH0g{=x*qV8;RNaAL3yNKKzJbJy_0bz|GKj2Q^b=Eiu~~5r`$Y z?{;%?Pw|GW{RrvoQj#TBWmN@!)nZVV@4W~w|79EgCW5N{#FQp0;kch`!x!`N6MxjDH#CqMi7N{LJT=M;h2QI=OtDdO6`BVi)T*prLr zAx+!5yjafbbjg&%Z}QSFj>mryH- zYH!8^PRDaaNZb7if`yR#^tQ{95D2}t23Vj47=%XGJqCcTcPm0;!o+UMw6!rv$JP0yy`Oo29(; zhVSqRs-4bh6I0ak`d;#xsy)~`Farn|hi-!8Zpz4bYB%MIAn100+iSn$hj$4i0Ox6GX zl_w=Y5(Pl*B^=&^1}|`@?hoVH$GUe9fiuSisJ4GKi&_Gex>T1C1Teu4j!lTbNJL73 zlN$o7nV0SqF|7MI-x(*UlOaT~Y-(5EKHANP>S;=V25wjk?h{x7LoR=0bfd*}=u-J} z<~QyY5chvKgGmQJvU*hU>?J32f)N=-5Q-#S5jy^i>830ecNM`x=M2_Z{y}~FYmTkl zSHP@x10>wM-4>1Vx`tp$&BsEpcq1@Cz>vyKxuF*iR_iG^lk=RNFxd{~qaj9|dMk_j zFpA*u$7Mt)SbcVGuH!&I6o5AH6V{4Z>Q&%qap%x^g$dqwUM_$SDDs(*bDn z0JxF>1YAMn$cj6>f4PobMuhk?N&2_i=kP@ZFT#od^L8t^j!507!soY_EpR)xMv? zVY|WBYcG$S6319xAGrN!1`qUms~f(IApEi%bL9uL{n!9V^8@&EatS%T_Ms&-d&x$k zxQHCmP6B`laGKN`1CbM>1-`_HVggWIN94E^1tHbt+a9CX3SsNYD7K6FwB;BdD|;U-cTgsw z0D9!_19W6SJv%12WorsLEa2>B=~w}bV}Uk3SahCL&|&e$AE1a1pT-W3Q!YR3^XrHk zh@3%MJVeg>LO-PX-!ky@CkrTd%$Gp1?Yqr=wa)T-1)3xj0Djna-Frrb{RGAKu?YHn z+`Z%up`#Kla7yt|*R2f2ZW(a)eiV2Xmja&2!o8qBj|e~q92kH89^ff-TQ9u?*s=1b z69CU6-J4W{pnrG~l-gdz`ikhoavHq>-6e*gJLNvqPx68z3XFTn#5T}ag9D9IH8(Ie zkT9CnzS{uXwk)>XuxYb2@xY|WrE!t#Zpw01Scsgx67bA30+gLUge87gesxluoqUuR z_6Oz5n^?4u;HXe8U?XyFSYN_BFcGK$6BB3vo|m14`IK{mgNL*qacCR<;cNvf^xAj) zZU^|P53n)~@EIF+MmZc?UZ+QU$vX{#z!e#5&k~k`>G?Gn^86+vh6*P>B1cj55;|ZB z6i;AFCV1Qdxb!9AWa!nbITn~&O)bDd_6c)ub-@Ph2#Te}ME!+vcs!?pUQ?t1888@o zPv}~Gc~;ze$eRu>eK~^-+Tn+7??d4Qdy|t2b}8pHtTX2-Y$?u^%es81^ANq}rhJ^q z37(o$@8!oXCRko6{t9C+I7+*^l}8Lp^5y~l-naxufW%E$*K&0*=;%I;WiW&5raboT zE6G-CwjqWM6>R3WHv^ICVr#Is-?Ci;u!0lu))P#><-Tl1!;J|3KaZ(A4psloh(Mg5 z`rN_fP;N!Y1jV`{w79zuW~Q~EI0K8c81@XH)2q|BksS`xKN~U3qsM{nO}Ljk6t_`Q zTwh2JmR-mL&WoJaO+*fjHkhWV2dgyHV31~J1Ir^FZryfvYyf#b9?uRK!w~r3ymXAg zJ!DZ2KIRtJ#-TVO4%>-B9u!k31ILyifR*zWP7c>J42ergMqK~+-~mk4)B}(>eE|}S z2LQ1l07Di4dSX@EzU(BwZkKKu%2)caD8U zD8+-oO*uzVN?bqZ6h5Wne}K)-BiM;t>Tq6qOkk0f=|GwN#~|cD^`bv=z{L8pOEN5k z{h=?^8NP?QPME{a3b0^$d15-n-~%&$9zNW;mfd^pYxMGSFqgrbfyD&aC7qUHgvmB{ zp|OzyOl0uyB^UW{0K7C=5z_x*2?9T9FTwB$YBf}p>)P8hSS!svU1%+Vb5k={o3`x0 zMEnJS;xTGfo*&$By$Sxs9$a#c;81KktS5&q+`TmqifF4mv_baR9%MrkK$ff$#1zE= z>P#h2s~!iE0Jvc7OpE~Qzy|g?{%05;et{_Fqp#fHnHqeH(O-N6^6bLEeWL^Ck1qWw zPlh^+OMiw^K~opLXE$%0&)LOt(&)VSIUoFsR;fMu7Iu3S-OCWTy*$b|P2YdMyh*%C z>AR@dNHeEGJ-a-6V55~qN+tMs{nTm1`{?BK6zz6ldU6s`DTH1dD$K{pMmU`tb=1Ug z(-O*G?5wWG6d&j-O*eX*4EoyCMG9qX4rMY}rcQcRssm-U% zYNiysNq!~DVfmo|iYQ(w!)JVP`gcw6+BY29?Vb$8Uwg4%smE0Lfjnp8&&&I}v8Fwo z`OX#_hHm-VEaIO^J&EUkUEG>~lP8o|8EbdInYn~l$GT`u`ll;eJU(tjtR>;_-5s{Z zn%)HTC-i^ky(OTh5Z!vGB%o$AYqmygFsdy1K!Yj`t?3Tx* zATT74y{UoV`xq%tanh%K(qXDYY5wVZm*!A;DTQY4qS*Sq*cr!m?LYoZI@0bvr0SKN z?bFS)nMZ^5vHvFiJP$QYfL~SrCaGVU9A?Cg6Qd+^*|e2nI?w(p-=-n+yg`^FuVb8z z=!fBr@I2uxL#c9;^^CkjbIMo9hYzxzPKdShyH_e*ziy4x%YRoM(@TS5uyChVx5#n$ z_%&KzF84c+yIs|8ZA{I)pv<5M2fW8%I!@2~?Q8jS>4j(D<5l5shab5 zakN<&PE=nWHYsDp;EkI5^ka$;Jd=N!gDH>y@8RRz?Qy7QxU9bl^?8@|6vBnAhnO;C{R)!h=t^b{2_R>vf3W>Q?FI?z zr>iMeq{UHCjzjY=K6xR2rQu5-!lS*Yi`~~&WilP|~my8@RWxsxXS8++} zRuN%*7HTv{nZ?G8RL#3&IDqLe`bn8}pZw(6-yJ_Xn(yJHOW5N6TgwcwJlh}oDM?VQ zS(gkI2})pVhGO+&h-u6m_-7st%->5#fw|-QDniyB%n!DKE8dEa?%_RVq-uK4mwrVm z9ZONP_BX!U)%!2!C{?kTkwl0~=4u{6-&H8QfI=TOFeaA_4ls_F3@$JMcZol~{7I>L zouThK6tgbt@qj76WblE3C-77D-2i6jk|6+Q@sha-%>Lsyx6WUt5vtx|=)1+7OhiUc zeCHLIC5AS5`aY#92`~tHVrhC}rSs$OlNYngn^rW@2f4vyE&<;x;%?)GtqxRJ%emh$ zIUc{@^w?|+bK@;|bKH9*SaW)4sDr-ql6b8V9n!oCaWuus#mIM4D{E6V7Z+X)zTAgs z^2S#|ZZ`ik&*BZy!P(t@4AVc(8_)Yk&~+Rao3ZDOYop6n?N;9-xQtW9*V3%eT7)8- zX#I|@M~?5e+lP7rb8?J7LS&(_O9NZik&!l~|HFY!R^9KnNfq8?6givg2AZk3x3*eW zDDvyAb7fNt#Bb9(X*Sa*t%crsDwxw8Cu9}8i`LGJxVORj$4czpKz8TR-U4LA*^n45w`FXtcQ|QZapdV|waVgHvG1Mt5MK+xL<2 zwbXwWmJeJK>;D9vR6Mmhow%2+`HlakwTVrX**E_CS}oHB$NIY8a{~U|KZ}^q@h}{_ zt$h?uzq9Mts)352)Zuoserx}#K5hLssd4Q(-;H%+CrXj7J1>JRY=6)q-XsP3rhNr(vJ|0qL2qi^gDu(Cbm|>M>58@y zW07a1^OEC8C1HxXwegWSaw;k3&PE~6Wtn)R``5Bm$igo>dUymYQxvKEa66P=2e@av z2g(Hc1+)NkHPAhS5l9N?HPCyYOrT$ZmKh7JcJ%VLRRZ#MRRRGhCX)7>hfl8)9zK-= z3IS>dx{7i5lo3b@=rz!LpiH1&Knp-uePtPa7oSLx21}8D!<4#hYa>MkL<2+%bQg%> zE147{T$zDbf!KjKfw+Jk*jh>PYH>+P`OCh>0_{MVK)-+%fUaHz>p)UKuYukJWdi*I zS^&D*(is^3=NMa+o>==wIngV6V)GyMMDKuHe-sk=z8bp~c{`4PDDsk_l)6Sl1%+HF z5K#kDohD24gPyoOgf;kP$UtOwcqFxkv7n9jOjsRk_psjDB9H}oJb=a-hi!x)It z;f0QD#$&x`G9xF2=t>BoMo-I3neo>wPuxKt6QGxu&pVD)>T3vA_Lsb>y{j*ZB{s$B zYeddP8pqbq%Lm@u`$Y5JtEY`;(IbuT8b?QwHm`9%q2Up#jpOL%nKp3<;@00r2gbWN zH8XuTwc!^&6x8KdpBYg@d@BiXvNdPg|5P90;G&56B1fa2w~8W0K9DX(e!79n)vYi} zl#uRIogRyvLB583BMoj$ruI$MY;m8JqvLmG1LFL>@rZRErw z3+r1pKVpiVOz||yG>YRKtsq|dL`>sy64z6lNmlJfip(MC`%nGIw&w1ax19Qu*)CpQ z&xRiRF)Ed1*0*b+~qmn zIV1G8QFps&rSpFnABuVyA5EKRSz+^VWM759azdoMYj;OMTiNi_ zWRO^7skMAmX}LHz5^l5FOV}yTYG|(>g5;LyQ9p*@@1z!-*<>jZ3=xTDz7}n!DW3 zweYvRGOy+BQwmp;mKcn1M zc=%^xnsMeQWaJiU^pfS{lxuRYS~AMM1>t83Yi7&-$$2X8c(@<^*XkeVkm0zR z#eg@yKj>^!q6y` zSuR{BX<6T#Rz9{@KIk+ikvrA3uI3K$QvNMm(vAs?(lR&Wg@;$~IpT#z8p#XB8|@Co z37c*fwzS_VuM$xz=c2GQhiA8x7l|mCPiZU<)u2W!5pN^PkmHVp@oviMwKcN!x>@qq z|BPgc#rDX1)*mRG@7Tl=>ubt}L@ddl7c0eZnm#mdim%V|O%jp!AO0|c!+krCAYoF0 zT3A$`I<8mi)ZbqGA_3KyJFDnYKD*b>$3Bz9`iLRq7F%R*SjOuoneU%)hezSPL!@pq zN8uxcx<5KF9Q;^AmOof_!?P4e+>g6#DKZCkhh~qCV{ik zPDKBOMJChd=!0qaHM_@*evehG*-)Q}qcQtdbg1Fn#m0M^d7eOviE?&`H7P&^|`k=onflslD8XrGi48r7D7Ab zjV1o5uM#ts;<0R{%h%Ocq%7cK3DeR4u3u4O-K#xi?ft4-6!jHlcj8`u`an9>aX`$o z;$UW#fKPnbw75buwV78r_4^jDXVGuk6Ft<-$ntfIOq9Jz2)|*U^{~fjfd*lFkIoVnX)OgJ4hFE*?!?UgmPNT z|9B#A=s9duFXU9+&>EaFoR~(kN1sBjWBKCf7Znw6hmMFbiKo`~Bg@g|h<(SRuKu7@ zs+{6ln{N%%t+Xd63KMJ8U1pg!k6H_C24{5X(kjXY!Yy7}5vz8O59;-ecfBqX5PVZv z@ql%hoO?rCclx4->bU#5Wqz}JVWIQevWl_2!iM$I`4cBT@sY|(>t5$?oI^5>3AqvB zH2Irare7M=p9y*@u2#UKBMJ6WzUvmev#aCZP`0#;58E%;mhmgpcaE!c($yaw_w*95 zjF5{e&v9xSSEzseqV~ahq(@_Gb3wX8w@X|PNYU0PkzaS?ZaJgM7QTd zAK_7dI$f2Jfw)=s^R-#*i~>n!7M-A*5#Mhr1eY}!U!a$N?yWKG)19Mjtb$WCzP2u& z)Z&pHVs>e4V3Cg4(|!3Idx|kcB(3(2?#owTFK+o2^2gVx(MZ2EpW03_c;XbbGp& zU!Yc|U=sIpI4g1LDF!)cc>Gvjc&}AW_-wSp(Fa<{%sQ;RORsE-x@;|L=^Z#kJhT*_$KE?u%z*g!1YTf>m{~d9P0k^s9>4XnL zz?9?$mR_kH-Q9ec6{r^ap96CLIq(*thSvG;bYh143;y^yrt9DIZn`7m>XA0IQ%v833qkI%!0pDg z%7-7&uD_s=I<(FPUor-{zde}B)+@Lh~lKEdg#rfA=`xs4{d zems6e$F!76h0b_?-n}jUu`T3c?`X!;AeSqOx(0ouNQ=&34B9Xhyrsn`;(&f`AjB1Q zQNArMe=*XE4!&wYjmDo{8daJ1mclCzM_;!)iU5|p_#>LgyiVXkW?>uDNB*{iO6JST z-2W=0#4ZlK-Oo02nEr15KAKq}m2>l4;PyP*^wvPj{9>+4xoBJ+S@u3XK5~U;W;#ou zmyfE^jh1Ww3SSVMCfyO(m>FqvdTH!;KHI+}F!EOAUlH}r7-AvZDI%-+p8?avc)f%04maI9+|$DbCVm*TCErw{RRz zU0PdF`XJxp(>C9tb7k#H4{l2WNs2b_4+)naZx!Zp6oa;EW;`_qHNPSq4-qc)4 z*{k>~6k$4e;XICPW$LDEqA&e)UbnL3wrb?*JfARS)7Zr_-9EBWS0G(m<)plDwP>GM zPDUCis3Qn%?A@xvmG*V`QQAv7m4l^xm3fQdI`K9(x_p>Xtb?G1{-~ZJsfR!^bW}9lot>HQ4CG_r%J~oQ5n@Amo8|Vj^PjO*T#3k~A{! ze(6+Ynaeu~E7#LZrky0l@}WOUal<0yU#$f0s94p$t^GdLMUcB*=wyo9*){l;t2w1F z5=&iVIOLrZo<$OsdQAnNfMNMy&zMN=J3DHH$&_d1oh^BBN019Uk|NDHrBRzVwcHs; zTR}I79IuQs_$%?^qj%(Z?GV4|lcSbFvDgEVQ@3^H^?2_9zCOoy7IPUPJ#Yvzb$a%k zZXWT9InFL+_efYi)y3g{Hbfs?s}*?DCjF+uSIc8psxk5X<9-*^3}Xhznc4$9U#-R- z4ifGQjP}DwgRzl97378CcZK!jpIk%99U1DI&adLfdwyo9Pt`t1(5!rrAo%YmSNYcf z{k&ZK+UF?&GC9m&|EY<~nZ$RT`h`p+=>Nr4cFLW}E>&1xeI0)y^sTC8EQ?ENu`kz3 znKp$gsiVc1cJoxa5)q+nwdmQnauB4DMLOG;Thzor=Higo5TI}287q^+t~Hva@yyks z^fBgd>;!sacAQWoamw#E-*Fjko>;FWN~~ zQE-VQdDz4?^ui>bU3WC=;!CN*`S4G!{kz}3;5y}wB>z+6hN+y|d>vmS0!r~fX$vT2 z5gyA5ztl9*&c!blt8bwW4SnEFGNGTwXqJHLSk|zoNzIV>k~EXKPDpAYYsm*%&J5hn z6+@lAk4tP*tbp*W8azvua8=dm}I zkz4Cc7fr~0@a*45*9u%53JjwcyLk^HiqWSZw;N}4g3glG&CUt@}ilz^=RFfZs=wG zRkY%#>A4eBo%UO_;hMO@bi?k+3F^h#esLkXtYC3JHFBW9+w9!*@I}tRh|fi{<>M^p z*F;|x#^aAZ#~eJu7xXRQVPJlvmAqr4cZ0n&Ch3WdDbLs6cGh_3Jx0$u-2Qhp!nDTA zx{t#oFq_>wQ+7K>%RfnSHS9^XP)q}BOp>JceEPG+oZ&7*F)__UXJiY9+&xpYD69A& z^Q&hDH@Zot8V-(3idS;(YZ(;XQZi&XL9L?S^m)2T9e#+oI5nJaOhxbIZ_hkV-8)X* z488Dnb?EliM<2f)aamnWOzZ#dS(v#Ta&x_!Skej@6J&%&zacm)W^|>YxCYV*6%D%j!-J| zjrBDx2mhY2G`JsDie0R&Te^8(941jG%Qs$dk9ZGX_cm?y#)9;jnaR092;{dz7q1}i z7?TjE&-kuw-n)2ua)cIlKVRGXdpo7~5I&und`8yLNtVjlhUj=w>KQ)fingRjH})F! z(T~Mv4EMbU;E^__CymuvjjLykyhQqHUeyOn@%C5?joUAqMEfmqvA7;xR1o4wH3nZ0 zxv-ubj9yTgIXyf&RVMNomk@60t>1| z7k=x}Dx5Cvu#YjukL2A^mvk>qg8$D<c`qq1JjN;b!#_@#abK2 z8b>kNuj)R2aj@%rDze)s?UB<^PdqmRf1KRhtwFon2Mt_I95=GmpDTOLi`4(XnnnLS z-fXyNSY2*JH;;%j)VN2CzW1)XICV+7;_Y}5-KcVMhOX2`?_Z|3$&Sy9iudTzpS!ps z@9~?;s{L&+&XzemO`@a2w-p^*Pu+8{R3G@hsMLirJxwz3m^F3M)*4CU?(0O3lnq64 ztJh|F$ml%UF)TtZlnDP;owe1q{~<>k{vck3%*^*)2pYOO4m?71{L zme}9k)6aQj<=L}D?P6+@5*J+GH016zBjgNfD}*|db)ud-{ScL@{y3bu(>!DU{Bs|F zu2Gxv&1iwvfBJlbe@Mt|4hr2m7EFsw4^VoYq_Yj}G@n=B-;?clb91~{@I{S=K<*C9 zh|-G&2NgUov`_Be(h0etnxcm*D z_}(Pd;%05(=f9J34})yhP4e6s`}J(17rr*ulve*RpiplP{v-GCO|gU2plI}<%eU^n z%zqI}*;XVp-Mpy+f`ryfW*5z-OL5p(H1`it0y1LV%Vztl&+OMW&y}oNr4uH+kRyXd zo3oh*VYCUt4=qzk`FE1p#!<U&j5bVV|LS>#W$YfOFL7M zc5Mp=3DxvmOi!-AEXz2Z^<1LmctV4i@Orz!kLbw6iQqd_E-;s8t$vuR5=NZeK-tE; zkN^Ai-$T#J%!}&c#1rr7yX`x(90NWrWYIVVRsU{3WOwhMT{nCbxL5F!R``&l?OPzu zu8PKU=G?g`o&oWb_QRPliqj}YH4By~6T6+6?BGq7r~$9JaPjURGq^WtSclB|KQ#P$ z-G6d)5Js6riaxzxymqh?M^W=^@*vn@EsWLLTS^|2NSZ2`RSlERf%VP<$sn~Wz}$Zz zDU%s^^kwf>F@xGwZbfs3bXhVY>6*K09l@;qI499+GE$LZ456gNs+p0&46#zAtBmQg zY(&yJA*^Z&QjrGGB)RO@uP7C15gcQ^^72_n2y6dMw|F(e6nuGc2QF6ZzUnUzrI!8F zRHY)r8A4;O@~P>BvZ|>|MIu3#Of@q%nBj#q=_-4=tTvJKID^{XP}Y7;smPxUp%2Mb zGfRURQlv@Q?x)LI5=mQREU3DLv8w4wMJ_RfJ`Y0XILKti1cm36$z%>vsAh`qiK@b5 z$>700&+n#x!6L$D3T8Evkdh!^z;nB$`s{tMA2T#z$}`tM35TICFxXG{R*0AsDJetx z7a}6;*kINbDJcmy20R@i)o1WjG6QJ3m;Qy42zv;co=Qmw1xtxA;Qf_gJdu!Q#CyU> zMXdTPGT1L%iZq5X{Rsjinqq_filB)p{mcF57U^74>D)xv0rbQl zLs-8mN=cY9;K`Gy_9h1VjXy&LPlGL%^e;mHFJvi%zWu+QrO@nKSyu_KV0To~p0(ZeTMr&);O| z5k+y-`CZ+&k?QoC3)(cK33m>KNtaA^-GkMGdg~97=eAqBnCbSJjPN_pM~TK9TP3%O z2-`M#u*Fo9*P25zsJ7Sw)|zwq@A#_B{Ek=_=c^{)8avmc>mdmJx#oEesRFxJ7FP^! zS7IF{u`~Io*Szzr(z;8smF0HNha1s=ca}mo613Ulh3z3kF%r~STRgj8Gzke1j(_5t zM#3TWKx|HZmTuVf%FN0`+M^|!BErfiZkRK)YIvjC)-*`8vPco3^Y=ZB28yozKhqzd zHWA|4dAis(Q6IJCK)wPO>U#1?Il)n16>d|6`0~kC<^m!d$1#ijP@ z#6_>yXQ_^-Wo(F<1xVl{cys z(FIK=@ePD}MFAvLXP-Bk$gYH6(R|)Ex3B0rjOi}pfc1YaFX52D*2-1b)~b!9&R|p; z-}cUg>+Duaq3>Frh|gL#Ax;q;4_(y{_W)n&#HSd7e|0E}DI#aWmu>Va@7m7GH2WsG zA#knTm~pDAE%9u3yUTrb^(L?{e3E;!L`AE6)ntQTVezu555^vM8Ij z%JV1d>`m!nEE)a~tnfHO4ClS;xEn6-m~0h&wdxGIl&Nk1ML(GBNiAqvYf0I?vaxd^ zHG6bF!ME0FonX>xSZQ{{G3m{~`b|3xU=P^e+I?rXc?P6*lA$ z6pHO77a)9*(LwQ^H$=}M*jkhqqG?6#2m$^=qo^AM6^kxGnC;s;5I!HFkBeYwq=WGD zh$K9Sdw&Lzw6x+^A${Q48SmCFC$QWN5w^(Z5ciIBzyeT%;8&y_#QY*J<8P4_5Uz>z zYGB;@aOq8gJ2*mEdx(K72OSq*FLM^$ArC;b4I(0u)c6P%6oh@VOh`ha#VG_@yGmdK z{J0LFssYgF1h9P{pj{7Ol^$U176iA?$HIVFAWLFFg+&U3yBK^eUV)(eV$O#oN9Ypg zJ6lRGX2dtM3!c#bEA2!EJX|%pb|1Tg{i;qj9(PPXxjeP77H?>%Pdbj2l(xy&;A@zX zY1d3_Snq~*vHZS8De?T)`-{!v>E|5MpNx{u&g+*3JZ6-V4x^k=hsm2lo71Nndw$61 zx7caRnXwQA+uwl?kh2Iy73aq&xqNR3&lfv|S<%_$-=Q{N=YZxKC4du&7kw8VF!6iR znkz!)%`PiKtDVfy(0T!oJ=bfm=Ekui#C8s0t7Tc^yJj{*-^kXj0zk7>=ZuW|DP$Iy zw~0XC9tctHFCqfiU;;ow*lhoF`)`{}&r63cRyO;@o;6^NcMoHEIqBFiMXR7Z540rrURoFk7X1wz z1k3rOKk8-0N8pDht|Urs#KF0*VH!f1YrG%~x~322tY-T@xHSO#tfm@#J~`%v>FTTn zr;FVa(>yH)J<4pnVa3mfO|OhQPs2iNRDc~Xl}m2O2f!J|z)QJDGjLmZ!pAgi`8G#v zxWI%OuGPB!iqO&l&mIkj?G?9hGKlsKCp)?1Xs;mJLc0Qy#v6CimbLmQ(tEjvG2Fsk zZ@fnuw@k&S>8BHb&^r>h@Yn)uM4LRVFVZ+`>dUg$stWW^*bMuke*pVb!+I^kEMDbr zV8t>^uYJLjDx&pJ2vpX}B8QS4%^pp@8l_wKi}f3bw!Dj*h_*V&fZ-{IG@OE`5R(0P1Uqw!e)8h&HM+uuh=fkv8>}e2->#0~e}l_a$rP zAj4pIfgP%HVK|p@ygbc_6`?n0SKY#ok$y;HuVSb|b1$dgc>t=hy)Y1Mi3c#kZb(!! z?mh=c!z6m`|0R@yLnb1iYzKTHe~t@%?*t%?3l$!tN`_OFBt#a3_h>Bn2;IWV5#ZGL zM}MSo8VAhAsTW8#Qv(=Z`oiW#F`knD1Xc14b|>dQuj^&CIH0140xVVTK(7f%j~PCP zLExidwxvC=5N&P{=iTO(Er}{I@CS{X5Wha0!?8!R1~;TOq!lzGqtlj0cbTBE@h&uO zf-lkeQ&PHO;0g{Yj08*Lp`^$T@g5EJBYe=h4kPz}3SO+hLE+36+@lFgfd#<91y|^K zGJ3-~2oY_8PA~>Z8>^=xKVY~_tiAScwIG~cfb$NNrhSmca^@g>d~1(JK@O&(S<|AI zmR=?eu>%t+ zfNnOlK}h3)Q0Qi!3X`=O50-T|L6p+{X8QSM@|S@9s(jhZsxIxiwhh!fCtU;{xXXbL zPD;?oLgNcR!9>xaW@X{W8r#n0AhaOMr7S+A##Ch7MtO4M6VJ z+x?2`X5Ns2)i%r-F6C}@#8oy@59eXH5@tG;5GL60IUGjnpCB1H0|uT#HcG`d%!0xp z>wBjY94H;b4Ya~US+lGX(lgpBlweLK#9B>z>;$ZZ`t7>D06qO^F0u& zOfuWFxsGT%@b)HM_kRJsew@R?&Wr8Qi0oYl@St)FzvBwa;#uD3bI_Lt9CY*?TKORzIeD?Z=;} ze_1XIfIONqX(NCQB-EwD_lq)V8%dNo0&-e(A|Rcmj7c7JD!heUioe#6fdg*_;)Eep z;ywbh#B|>*PMF2Vg4p6TBu&H-3I|jy!xps)fK0IQP)}%x4Fxd{L1@*uxRfYo*h{YX z3wc;poHE5- zTh;Sx1Uf}#T2IPZGv^ta#jErq+QOKYHfiC*_DloL3~rk@rRUK0OcC!z?I+{f-_ppD zGb*>I&f?YI_McQtf9|8@rdj+8?FXZN-n2I=ez%y~<-|GroT6FLl3jAj)#&)? z+n ztLFv8pRYZ)RC<*trO3rs;BUN2Ti1`l?JIc>!peF7nU!l#<_w3_rk z=o~Tvb0yLC!pF3trFH9u%m-nqjmk$Oq|8pl3U_XFKUMuGQopo5KaWg0qN9w9dh~#P zW?wwwL}m?e}7UF<68 z8n{W0jhpmjSw)hYxX-_fcXII3pVroED9cy%t6Li${oLF(gU>$as=`+K&$H$Hs2W!FTu_=_)@EpXEXz=Qex_k`{!@zB{4A5-2J1x~L+YCC{CmAEd=8hgbT{mM&^_C4&UC-njTuWJHL+|i*qU-L+z!fZoSt|I(Axq#cqBi=(iG% zVa&c{^Xt^`T9f~eqbm=D>ignFVp0jAvPJf_k}b=~o-EmA{Yr}LOU6D4A=}uqr7YR^ z-C#<@5X!!fWsq&``XthFO2f5 zjFy#Oiph=}!^vHe3Sb>zKzGfFn5adp#(Pd!CU7(kN^FOILhmh>H9o=J^?jWXutS2! z>w2(E<~=^UP`=R|f*Mm(zs$wONJmExi--XRl2G?!C%A&lr1#_M^85Rvos13TDy(%qvGhfbqW7Tv8(EU ztg-W2RY{NnK{`Z8kcax25xcsja#vNh!VVA0b}EPJS=}Y|Pk@G8VdpNW=%IQxa?5E) zLV@5GA~YEz;lsw!Bd?q;i$dAzzr{y{2r-j!#d5uPb^|4wfU>O|qw(S6=y~`>>yt*; zeU}=n4eXf_nffQ{swh9vuKR@Vz-G*09B;7D!pvn*_kg44YL31T7dghc4cjo2y?#eI^MdE5zaR zEHSS!pPS3&i8@;fs#%1?$MW`JYY0AwL%o5Chf7COedZ%d_CAM?t>VtF5PT*um81Ey z!5VotzS!hhPg{lio?fEaR(oQ?*)eFJcT5WRy|6f+42!6qYz46kSw;;o>U3^E62T0lOBUov2A%} z+^LoR>4nbqL-rO)#Kg{lG44P8_8ABlr-xFZugqQvk~R zi7Z`^Jgw_AW`3Xl=^$w_QrM(ukFt<8l%EvUmKYKCO~T1n8UGx@GyJ^~;>luuhYKC$wS)pb36I`&51Iq`C z%WOPGUYobJ@h9wWn$-xsnRP>_4;Fc$AM|?hHYe$Hpg*Y)%`+(J>xADizO&=;o3noS z3DTe~T=v;HS*Cq^R1a}c0oYh1^$R8kGUp!Bw z-p1i@cDG41(HiaDcvu7$cDB1DR&=IwmW7Z+_F6!@7jBzDVW|tf{Er2o-7;uW`ORZ> z&G6WJXjO>v#$N0A37iI=J^}rO>Ryh{W3uFBpdY=kGp(h>h6}Y2v`>Uf_p$U3Ld^zm z8yld=nNhW24fR1Ze!#!Yd~~&6_x<)z)^%R(BjUd~huHYrN3 z$DA@A;8G6h11DR9K z(YcQ#A(9}GHYis}qgBGwZL>-oS(7XnFkotl#+j0qREw7z8AdLt4U{m{YgZdhY~_{h zR4q0Mcr2|IY1gh6MW=q={qSVS5GkT(oZ}Hv_A;`r$uyeyrdDutc$j~wu-NQ&yXhg5 zl|##JZRvh&a<#;mW!Hr3zLP^v2i&CP6-&KDpig=2u&QT_g3 zb~ohpmyK=9`fzvkmXR*aA{7Id9cKBf%T}i6MqXOI|C^Te*U4%75pqq;>Opm#Klj+g zYcE@e-=EV)W>eBvP3r~!yK>Xy@#xnB6^BWbUy~Pwmzk2or$3>V=+A4Pr%Tpc|4S)b zXjEy^?nzmFxHuBcHauG}{I(mrPvk6c;&8Jpxz#d1k5q+9Z|+3ms$zbP zePw>LVYX&BN!i47)!&+Rm%z9-w&+p1pekG!gS2Vhr==I&S&d6Qo{We07B^_qAOP{VjRmt2H}p55)qTqf{#daNo8s~t3D1jAawurFa)XBgHCh7E*a-@~vmFl;gm z`w52q3d5GcuyrtOD-8P+hV6r4M}nqiVAv%XmH@*Zz_27(1bIk1x#Bz-$M$)`dyjW0 z??>uWx48nZ(Wy~I@P@QA++L>UaJ$&$qoh8?!9}`87e*DK7ShhGI8V>v_5tM`D%I8) z>PxrG#Nif&@>W$hzr)o^OSk+s0};|LcYB$g!wrSLJ`S0jxQ?i(1hUcP+@hh#h0J6 zZ@}E<$_m=*JiEwoeD(Grdk9|X?Nlt#;ow3ls>_F>h?h8y85MU#Lm+nlC>f-RPWgjN zVgnf^Jo3?P{(CixBuoLcqd{6+J+V4h1}z3l8LlB?POPe=<@@;8pqo)AENjS~cZIf=Cmq)&zL=!M;xH#-}QuO3gts(O|J6`EezlFq4mj&H+%CpbWI&$;&Nh+sRQoT0s6(Q=*7=Wk!kPJ1BB`&ti(eN z{4Jd6{eKTFX}_B)JJHZbFHGYP2&ZBbF04hK4b7Z63)y8F`ErBzA7b!1L|fZSB{bWI z)W*b(cZ{j2==uoV-|@_h)*|n?(<-IJoS@35)n-z$HX9E^2ld& z)D&IF4r9GEPvqO3C8P6;y~5<(>1LFPlJ(d4T%5U|&P?fjZf>kq@bQQY+9?w?3gqqy z+F2SQU)%_u%(LtI@VVj1>-LSPM)5}g@eWW>019eA!2~Go0t#C|F`(e-?DTXdhcR;U zrblt`4&nd0CxDoOtdW5(o|~*uToLGp0DTzHrw97HK>sPw&->@Xm*&$C`z&40=f0;$ zfKT@?&4T~R?}Da2{5hVA51LAYVY7qGI()Cb7=Jnw*$zJEfL?bVCS2%OEYjV{=x)!i z<6U;aI1ih{t~$0x%1_jhn0PzCknau;2zyL#qW)s&Aepp<(=hN-*p6jC;-UD_Dc%I? zDHwq=Z$j1O(0&v6CBNRZ{Gb}+`)HqFC3>~+Zxw_;j6O_(iiF0dAxz9PB|-L$_q7^Xswz3I^88aJn6oEN!fwQ$<*ZwBJ{Jev6|r8g)ZWz2pwp5-r^+RYZ`9VOBK? z^^pqim@BFh#l>YrM<+HcP;6>zSX@%3Z<}|0=gZpl`>&>)!y|X@-`!=t!b^clN z&hlD=I+S01_G41dIvIoCpx=3AD^V;msmyTdlLw*IqHogA&k;pI1Kr z$oOG@UT>;d`$NQ`|6235i^;O6T=tVSmr9Y3roWr+9x<(DAB~noI*g3A;NB!S+kej< z$%7+HPG1~KOz+SHdkPM`H*rdM{nto4wZzQU-u~=?TjiTtY){k3H5&`#dB3%$UpXWD zqX-kp^%IT5#ios>=v}J{=d}GVA^~+_&bwXwO~Utb-kAB({Bd$HoNki4t685jp5K(& zf_R1q^c!t5TYWJdkQ4f*e|)>CC5!v-f#u`T^(WDP)@2$W*Fzi5O=o5~`ZQe511w{K zZN7Iq3S=7AZM9SnDbk`2F*R(fx<&}Q)!V(+Kc1xhpkFqN-85L6`R~>X0-(Vb3aRcK}&>{PLu9pm3$W_Q!w=js-=y zhi)x9a$tDFZRzYV%zbM1I|Rl~DJYfJHmG+RaEoKwX{ zITDt&f~P*i{6?L{&Oy%hZ;z0T6!k4#>Q1ez6*qj3cQ9ouwAo0?n-&Z<48}~r<#YXZA?!4jXG*>ls^*$7eg_A>C zWojctMpe;0CIo*QhJ8#IpTouV>aqG&`mRhK4ry&g_fH{sg-RAFM7GJ*3>iHk%Dzng zWb);!3_U^V%7t%P1e4E}`WU)cg_5BqZN{E}O}e~u?sX=FbQyD@!`@7hM)IGFoP-d!#&DVE<|6ZbH&P-Slz!TB5)y-^?k(J>Mf52UKL2aA2dy!;x-!Kz z?i9|K-No1Fa?ZstcV$Xya8Cu1H`1$NLuA}wHxFCfD@7ecQPqYj;g=_T0{FkgdMYIgZ@h@8Yk~aPh&m|NAHK0pasM zK?n%5|Af0hxE&0H2mdht1gU?RV7+TJvLV;~-%55Yie6F-xgJi%e}h&}fiw4W2pt`b zCLhQ4>_6dYcF3Mt8Xt#+Ha&MPJ(^w`RFD)aO9;(9b7{C}OiRtS|<&zj*@CBlg)LFEoK*3IqcQ zEn$vA?p$a{K~9+I7IdVlLPI3wsKqIUvLCyOnhKS(O(jh{ws&pW?&P8^yZ7N0?)?M4 z=vVhba~r(hvlO7t-V~Y2X?VzLdOlT&Bn?u!bl-A3dZpWwTCGRwf`0rW&v{Otc@M(j zL}xh;kw%?JX)#E2^{BTSvPZAh^h$b@AjQ@7Q?*i$r75jUw4do}^LAn3Zi@40?mj!J zX|{Hp>N}1lJ^uPDZkCZa;0WF>9TDjD%bc)D-xhlzu6Fzw<-M0Q;#^9BGpMFkqiOVx^%}er&JK5+>RnXFWM3N3R~6q#F+X3}g!e7vrZiF)W|*@VW_6d`pBwGN zXzG_|79T@4I}j(~_-GdPSWeB=^Hi)J@<2Dl>>x=|j#m2H<^6lUGHjkd-c5!q+|;p- z**>Gmg<^F`)y6V9(sd5uc#Nk@91-Wr1&QWvpsb1k^>2eXA|_J8#{ztOro|#N0Cm5E@{ZL*fGYh*Y6UFx z19hZ0sOhJ|&phLTet-r^YWyctziiSmw1btE2%1b#XIj;!ZQSjaPI zDkl<;K>^;DbQeS@&=}j#Zsj znF})B<2;7v0sH3xljMrZj>{mh5CqH(?sFbP1Au7}(Dv6y;Bl|%vCsv;L=R|VuXp9d zs*?omm3Kt#l;YI6p!t#7>v76En-N^WoP!C`?8o=DSmD_`(CZxt_k^@dNNZDKQ+zo) zF6?&;8g&xk)mV5ob*_2UDr@D9#woj?l)J#y(X1CAoz7CcJ)VEkgnZXeXaWO&vW!9j zLu(MY>Ezd7gC3%E?jOfoYF#vZ_{%CM<_}4ylwvOA&)C$lX$Ql9ogc2=;OFysf%|#m z`>Y5u6J zL}2FTA>`Xdge$!*f-(vqwx*b6>`1jAxK@N9nONuDDV@7{&^3vXU5`O_w7>h@eJ}C1 zUB2czL;pMPkHqu(l5KF%9jNyu@3> zLOGs}`gxjJ-8XGz4D`AwX8QT8Hb2cj%ZV>koC7;7t$Zq+kH#%(;4<$S%6poYRrypG zu6BDiPP|j$DNoEwT&3qpd%bt0Q7G;GNL3gkuCWm#YCFU)^Q3U{wm=TytRH_u_;Pb! z2XjZV!5p45_fywB=gx-a%@+fs;#$N}vPe`WqeGxVA{l~{YbAy2zoyE3d4|{zc5s0{Ty_dz5$PpHBJDpd~<<_j+vzRDn+gpF{ znZ#Twb^g#a@hqmSSX^b(MW~Ze*5$*QGA~VQ#o&V%FYj;@W*eiv{D>6X7OZ>4wO1jS zED8U0V+mpMSo33ioPk$u{L~wD)txh?ovm-!uXFkEBsJfw646sfb+W5ouLl46+`%)& zh)YD@993gOYrncRTHYv2%FNQ#X>Gz{h+V&?CZaey`u0jxxcKxg4E~oPLECuuwYkW` z`{>UfnWZ?dO1j^c^o;s2`!i-ObZXY^W(c#KNU-aVPTOnF#D>dlu(v(8GcEuj z;%!Jm!g&}k?*%$-?Mqx488_4)NK2#a+~3{)gXembjg26~zj)M!g#`oLMS!~saBKjl zpe`jHfpT@{r<|Y zXy=@ZQb_L-qU*CBgU6Y2Wz0PNAzn zStvoPEO4Fj9U!0u1abceE&u|)e*_>ntkeKv{tp78By$r${+w&92Ek*!ijrXkOxx!g zcmLt80bB^csf7UC9f0Ei;n0QwDosG;0m8_79YAvbK`8Kjmm+u&Cf^3%mE#-UGE-P# zgE;0gc#kwQdW>Z$j{gd z4v_MDNPSSkQJ!E5Cm^5gK?&Kma;NlgS-GDJBgh zYgP92sHlZgP{5dooz`1H7rQSi84DRouqPIm$^KVZ_g~(+xVcTa{G4#3YHamETCg!+ zEwOIg0g>@_i2pRG#W|QOk8oC+d_s^*Jjut^KWJe2{9sB7JMdRMWL-{lWZ+02*@$h> zW;`?dRHfrtGmY|AlX&k{aq0!;^-$P@? z*yh)pv$Q3iM-TdDwqI-I4QB8s_gPV5t!>_$G?f%`AOb{wJx^QVK{)(rACOyo$70!- z%bndRB0W#gdZL5LbMN2fUT-NTJoR-!TW@+GBL)!Lw5QeiR)$~>6g%BzUPufi$5kF? zjjF``Ff#j<@V<+@`IRYW&JTPL+Z|!uFIxD5;sD`4Z=Q7jIW;qRPCb)o1224i(|@i& z_Qgvvzp3xc-M{COHns(0_+WB`Nnw}3ua|fA*CSw?e2;DG#}d#ndhwfCm6vHU_-WuR z!0vM}^9$6QU`u6|IZI-ss8Xm7sy=<&yP z{mOlbDRW`S3k1uC}{w+FY0EmtXu7_<)ex=4z)cbXk81 zPv-uPmJUl5VHct-p~%U?p{@pJ|Ih@BqhWWCFG#-z)1_KaZl}b*;Nntzj=#Xm%|T1w zh2>wh&URMhV%Wc{E6Gvy!FNniW#~t?a5mdP73Zq8=(viUBMJz*Ub-|6<>9_21r|d; zZt-Qa8L>NYN~~IEOaak~alfM2j{Z+j#(FlJmmw$#GV0RE@J0HLosTRCD^*UNI zkZ!*s4d`lBf&id^BT7KmND1iPt-*eY@HL?ay|h57$d%**m%SpIZXn#V+3E~{YBl|S z1?xY~PX%GZy1=1u;E<&u5X3+vI)G<9$&y@Qe83+Na|va@nfkB)2_WwO$kM_ASu(Ju z_s@^(`xWW`$np!qSd)OcR6yqZ?_i9BIH+CMrJ?ud?i5(H1{3RlfFKeyt41`91%NrV z0XQ@cI(SC~gxU&(S}@Z0T0$JZQ3RlLGYJGzjHtE-qYvP zbuN0E9cz*K^WGyZi&+!aOC`EtWa#;)|1o*0L76h0Z^GGcIK;TbFQ06}%Aph8GpiO- zy2}}kU*O)u(Dz80g_GFte^We&`Mwa7@gE2o1<5 zknNA}n>v19pOyA?nHl_|zNhQs6~()Z`BPxriA1wNyfn>Q_g!uv!{%w|5W`n!u&fY! zqZp!fZlibyjyqzfOSqAqC_9vazfL;En?wCpix{zEQR%?y`f2Z5ujlxEHp9(-%N0(k z;dYrg1@ENSQZ+?dxHauw**up-=)+^AA9C==#%!CAC}kT+l=&D_2*Crsft;$h65Z;T z{)%Q)JtiihHmf~laA87!r@lBSiyUao1RX>r9K{|G`XpP<3=!L+I~YF$4Sz?)=7ggx zF=!M#@I*x1JA*q}>1&J!h>vso9o97tLa|e+GPtMv$kru@KcW&_$*=tfQpLQbJl=G8 zdb}yHjd^P`Y14f~RxL|yeq~2Cu3+aT2G*}Ju+Eg)1E5#SWOB56LQa{4%y z+~-?tC_}>W_S;6OuQJFEvlg3mcq9a##7NlvOQ%q_2V&+ScZL-Eksx`i{l&ezk#8CY zpxBAj?kQp!#C=&yUGsOtB`A9J$SMU7amcs~Rg+|h$o4)161cV>%X~v-O8O8QVJ2Ro z`T75w$z?yGVy(Kq`Dlq4N3#V+RUfbZsZwanqt{<&sw%{*yXv4jYr4WkC@>)aN^Cnc zprlxQMvVPc-GhW?PoUs^c#g~4TTmtz!ZeZ~A>%a;j>bk3{Ht1K2QjqOQ|!@1bOR|% zd@9Vv`&3e_h<Y0^!AN1-a5B3>j!sKD2$CmC{a}V)jQLRFrl3v z7jkTVRe}cx=JVBLbx!FMez}PZL#lAARfvt&$0Y+=IAg!XI9H00CNbZIdY1eB;00&k zkp-uOJLz^iU!W{kQvRnANPii*qyr_;Y~6bp3teov*Ll+KTT5DdeFA=bSMw+-UpDE{ zZys(59dpDYht0oH9O3RhU63W6UH~^vhWm|Kp_SJ}(T}OeFTp-t4%jo9@1*xZ!sLdl z=u41=7I=(dzlhXdPtu9{@PVC{`R2mIefBiE60qF*iG=g+K-Sy1f>k6_6tw2V`QcP< zSVo0-=ujB5=$c8y%^Hv>3y2W%8#4N9Rf_uj6`g(UxHD!`UuFZ!*-OO!wH~3yKTsl6 zJc`^Ygp@eVYak_qvn-txT<7o}<{_z*0!&brcX~|SA?+_vfayuW9FF@4C&?|+QA@=9 znO!UX2@!umWEo=+O%LQhd<=YoDkNM@Uj4&!-)xUImtPZ&%OUh_ zXfh)5XeJOGnD0nT^6K-MvK%mplSaX$4MB=fKZ9>c>3mG%Wx%_=bw;@AD(};3ojo%*VlUj4-8x-mizBRK1UHAi_q{% zIrRxtXn&Qr4B1rwkcjzoirlw6C-NO?elTYK3mQH>EnLGa7DI?tVpiA@a{mMa@Q039 zc?tiH#SxO701d+w%DzGG`-YLyOVDxQA(&uDk*88;*1|g5Mx8`>z!O7K<3(t9$kHPR z@&^w}>>5KiGe#krVdUx&4bu?v$p9;7-xi?z3nw2%uHL?z@*CNg37vt%@c*MgAtr{n zL?^iyP=vU&zdWhevbUgkoTU%?Jc86+MmGIN8Wrq{Yrpx4P#Qz*|E+fgD$9m4Z$ack zM-X(Iq^YM!^kAkSboos%8T)D(UtLt_a9M*Z&_!KT0xN|b0LpZ_xNA5=mO9!LOZulRp8H^>rcC4A-RUva-j+ zi8SIgtk;;6{w5x|+?D?bot_jNLY^w5dc;Ch=H2%1rQsYz#@uZ=m#FZcEKguqm4}OzdXxK0DG7k{*WO zB_Gy!jJEIXfAGHVV)e24ObQ7J5<}f=|BCucZXG-~o*Li1^5)*{{2h2(MweV^!jVgt zE_8+JX*twz3!;{~=;rm87;^PO0T zlko$IW7)=~JW)u3wDFkqe^18|6uXP-sEzfT2`mKb;<2aYDGxLSo!3mPJL|eGlW{(zADCH*ou`_r>?*cjCMq~f zpiKodgYBm-bGzUqI5bV)Np%F(Tn~Scu~=%Q|uK`4t8a z_h;k&9ZC_GG2Ci|mNMm=n4}ze`CMgVl1fD2hCcVwp-bWzhl^maPMPt0<&yXXiC;$V zPx?>`%Tb1O-Ir5@ZB7J}v;2A@bZzkMLHX`Old*X z!%d#pKmuYh)QJgyPgq;fCPmEFe9_aExP}wd^i2y>-L-z0`BVI6JoEi5PJG+Mo6Gt6 z;mD~g6ROXq75d_v-dW}m&h3bBE#u53aOR2x%gjz+#9OGT zPgCM8!uzpk+AefFs)@b}orr2;=t3u>no>|rA5l%wK5iJJc_-E-mts#id8;Jc{Zs71h5gsxK+3^C+pmR8s${q`pLs=TTOF zc>(`bS$*jOo<~LfB?bPgiuw`-o<~*vrKSB z-xC!4+K#pCFxTrYY^2nQD)?m|7Tf9gL)f1QH@}`*tcCzwj9`SORC(Q)0LE)N+uRMEga4kaqv#8 zjldXspyKz3#~QIY>J)Tvx*IwjP_QQK>MuP@=O;F+cIHf;^3vd9;FRUqbQQzlA5v~v z1$xm%z^PE9Q0x))-enuAVf z1Ot;5N6T(`#^rny%R$l~B~+j{Pd+J}qJOjz`ovG^Hav<$rZdZ>*f(|uagR>352%A8 za^9x&^-_?wN-EGjW}P`_HXhapW5(1!t)`F1rfX9af46!Wk7defGkRwWY~}ZrQUrcE zPns%7{?^e(2oyHDwqa#O=Y?47d}4=-0Y`3fM$QfgqxC`@F4)>ZmLB&e4qvX5&5X8^ z{nE4)*t)c?9nbky;9Ee2%;PoY*gCJ9>)HZOe};Gk6|(*o@pj>>cxtZn-<-Zhkjg{9 zbJLi;^Njn!(~morl2s>jgaB54iK!+Mx{OZn><SEiX^(3>1>vbBJm^CK>3+Ohw7pZ)ja zCou3wv)vGjmRi?-Ez9C&_}c%97w?K6XPL$cIsI>m8egl~9re#HwU5bgLDP=|Q^Skn zA*!*=>#i5w3@@!ypcB%Z70r{~26*t3G1D!F8 zSAsy#$YjidiF&?hXfeJ*MnCIC5c={rf|lPUbG?f8eO{q zfRkM7W&ID^4GYV`xUHrpkYx>)Qp&^U5*1Dz|3mKb1mP~B#VGHTX)Nux5jcy15Vv^= z9T^WiD$woEk^8%I;XltTqi-MOfJcrF)`;K~5Dlf*ROm=Q1~{C4hZ6a zT0#@@0uZ(sgADq#Gl{*72c00#IxU@!YpbXN zeJlr}a(oJAs4WmX~%;cx*YTGMC)Z`r*psmyn{=Zp5@vMSJd z9<;`Ju|1d2W)h5UhNcIRV4ms+q3($VL90#Y+!6!68D?^j;mT;tE$(X0N?9YaEELR> zM)rUWFG^seEUyB+zX^1*jKRhLt=`ok-~3L4tbXFIdB3J`)la!YhM+=EPTg=boFIa@ zmUaM(rRAW?3k;wMd60)Uv3&*bR9jqd9W`w?v`5feBm8C_nI|12U-PPWxd7TWn<~)c zEufF&l%O+FpI?%tfG~%(2vGYkNNuU}fZhyKStHc?ZLKqBvx@bmgjNIT^Cye;g8oQiQp-(p~XpJ`$LqKkM>qow=_iWxS zZ^MfZW2n(Rj>7Y=mzFDJc!m|sZ3p}+WWFX{Gfwroz8=$f22waL?SX`3ibIentEe?f zMlGshZmaV88{y+=ad>eFU+{G0^I%%zXO-0zGDgjJ<^=>>DZDy8e>Rda$x{m%`Zj-K z$~vFi8qv_*eK}~lvP$im@v}y4fFj5}8ELpOFW}I8-m7zO#)6^`bm<}udk{G8zq}dTl8#ls z6F%bPfk(KDUY%sg9A#DCOo7s#6G+eg1#;}gb)*HhPV3+H@0kd?!q;PJca+R+OXY!+ zvsK~6LPu}LSRKx&=LM*5UP9YAX#>~ltbuFOs_ll1s}{6lfo$^vYfrCwbv}#%imXTm zuhB+!P~~qhW|8s=#3ZHv^YX{SGS%5hpyv0znyeFsrpCN&=;l0uq1P(O-IDel`Pa)v zDM8Q?hSrFH8GozU0FV>w*v1uNsN9^X>1M%)>=W<0JAj#7>us3`#cuZNB) z1mTF6SSG(@f9q`{{2T>+UKj+^5noh8H&5;*wBI(}ynx?hklZWE4$^t)L7x1Wpde7^ zDzE&89R!4loNWY4*7ptFmS>N~riYxviyc>;tr0Fqz`Ue9&>8sFRI zI>?1p&D>V3i5l&9>H+#PcpFS)2*6kp4<<5JFvMnBK-%(4ng*e*-*mH6Bc~SvMb@1I zuK+A0XN{n~F)v_t*K};Uts0opwE~&h-l8B|9rNtfEG0XLAdR5JMG>DXZb<>T3|{q~ zQ~>U)4HjG406q@ZP(94Rvqg}~`>|B>S&%5(`Lbp(=#8$>dL6FNwj0(pfz?XCJUDqA zZh^V)lo99~8-Nwpy)_CFILlhSo$J1}1iZDXyCLXWTY=U}0=|E}*+v*Zf_|kZgibq2 z1HL*{FpbrLG-xNu8L(3eyIazRQ0X~@mM8rYx&kc+M*H}ugf_xK?IUyBQwb`x%u{N& zB-W2$SAzSpUQ{f*_qTo#7uU5BVglbabhnlXuM^Tofqo?!Sc|dB zE6@6X{OjyjAHQv=1mzRKJXLExHl6AY<|!YgvCN&w@M7E*1~0@xL3r_UTgY?(24vx9 z9)d&4v=q_cTbWUTezKr!-m+H=R$ga7Ac9^A3agm6RH=fMSCbe;A4m;9P`pEN^^kI= zfp30Q=PY!3dWwI3wZAlE`fTS*crnI^-V5OnA6{Hfphlne>OGsa2R_wHN!iv2Q?F}ohQ(JxKMgjOf_!Yo0`QJSVJT`iLo;0@1L9pE=*rW4~|HI7e1EW{P(XGBzc>mefj^({Kwx!0Q@b=fWM^& zNWoy>Ph3P>BPLYkfuzd*olp}kNaAq?ueGehvbGzR${$#K?so&i7nyZ!2O?M}tU0`$ zwm+LuG9Nxkc{goOt_u1{9thsnClSpex&>ZcSmzV|Y?i4)WVpV+3JxC*w6ZxtQ7aX?~jRxVoNt5r>C3Hq9zF*_0u`E+~EG> zd2Nw4hLGCWqx|5ms*7MLqCa|^h9l?e1?Ik0QMuovD@KhH9S> z_OeFrliH%fg^Xii-=14NAphu6C`s%XXjhn1jyji5Q(gDc)gd;d*^E`;8>2 z+uUx6^e4EF9;E!NA}DheV&|ErPF!6{p2?kc@J)kMdX6Jgtqh0@UPG-8B0O&+x~~1U zx!t0U=Di;F`{m1nETzs_HFwRkwm9Xfqg}h}GTuFtY8IMD_D8r*M{KnXKAWVbuV_d@ zXOMEzy~AoY<7Cc^Ux_2VCS$FCEx!rt!`;!7m{^Q{Zv@5WJ6}rF* zk+8y8SYZ~dupCy1ffe?{3URQ)O;{lW!(ODqUZuh^QeoMsusm<;J1$68UHQGoeD8a- zV+d+J1mzftS`S4zzDKRUM>&R}*27Sa;i&a+lw$;H9p)T7d7WdNVM|hz_!|{K8y`&@ zpGg~EN*muo8{bPCKTR9ILHlsMKYG&YySeOxTV?_`X9v?bEaIqnb+wT8*GFn9t!x75 zZF277e#-kRwYcHHb<(h}AHyC#f%GfkohLM_`(R5~D6ISFCCAx|$6V(X+rmK3IUPOQ zcCI}<`7Yn?^g#gUy)9Mkw=Mb;o3R{Z2}ZA2Df&Yz&CL%MB5F8z%(a+FS4fBtT2Wdt ztgp3W-jG2aBM^~%XboOw46SV^e0}`A($WB8E}<=M{R96!6w=4qgl{E{$}{Z4Tm5vQ z93@w^Yn1$ACU+P;b0oL!d~rr?*iFHng7biZTjvK{lcU4-vK_ae!GKM>XRJ$^Z6KyOpt!~ z1uob4ouGbT8=J+x#l||hh2!&$5xK+ zxzZB%bY7amB}k{s1<qe6ZNSi@0Cy{*AeEON zeM=UIIunaGPPljWW)F1C&;6_@SKIuTN*$DW{1b|S-P;Bk=GqYuO)AvyL8c2ixv*Ks zL>nGwjh^ASIozHqn$JX?^J^ycxSjQXK0%HSB6)*d@^^lkzp$qsv7c)C-C{Dis0SUV zWJOK34rm`+Eyd&_=l=Ql1*+Q2BJ61xa%R0$6&E0$5?y4vGBS**-^#~8=cSjS)8bvo z;wwpCqF7z*EJe6jkfk+A*R5xJPotC4*1)oXqosS@O3TnHBgJwf)P|>=!|H+|^1wqs zTH7P|wM3Rp(Xgxk(+!OU%{L#?f>)=KwB8^u!-@{iK1Q9;H&KWmeF;!is=oL4OV1?_ zctvbXGgszMnwD4e)eL?IRNw-yxxc&ngT0j%m6MAhd%{#J>DLD8IdWY>?q7$DLiT#8 zRwii&B)M|I)&jUhRI4yYfQ{nGhytk>wlYaSaPxL(lA) z{Jjy|LR!cYBNgu=GrOVng#D_zNa3cj-%gvWS{q`f=|bFA9+Mrx^s!|gKVRgL-|CeT zj2`LzA6r)$)V#S@{?(Qzd-IMIi z_uS{XzwV#OJF~N!O?KzZIdk4QC&we{7?c`0Z+;-!U(shVc80Z=9OU}-zHo_RF$136 zm@GR5A-J%;*S7Bg?(S}37x42cAW)q)bZhw8;MZZ`l5YZ$wdT3P7t>!6v%_c{Yov5hVt87HNjf1o zACYPIu1}*z_~j_e_3YQH_U0NzIerndER75LED6)|#aQ&8{4VIy3u}*Cg8Fn+S}rT% z)JiN%$N{BoL1%gfZxZ05N!fX{BL?_gwxW0RYnK8I=b(`0msw`eG^YZ>_F?UQKn$|ZHT3{=Hh8D0HL@qXx!YpuKPcK@)2=DZqn zskKsq&NLAsdWSGop!hzh#frz5)n|snx4$^?xYgn)(x=&ak#Mx}RifAQ@Z84TO&$5* zQ20Sc&9R*M&OefV_B{0<#ys_hQ>HJzd9fx<^&}w^PQ<>Lfc+^Y;sG?_L7#VmenSZR z;2bo57H@%-pv&a|#KUIZnaiWC-N%^QyH7M_{6#;RZS6io1O5NNb2RY(4`87|=6?VO z4O;&LFZ)Kx|AZly9^c)?RUe*ErKAKt-J3d_{P#rzqY$L6MMK`(4;=x35O^_mM20j7drfO`x~u%Q z+r6-Cy+I!shgUkxEWh20;(xZR>pdgdyFytPjBv?-+RASF|IppH1~oij%lNg(XHq>i zY0iY998T#_4@HR4a@0vW9<>PTKsvvnhK6tEB|z{E;QHss0$gCLL~{2|_8wS9o=}^Q zgpFRqSAe6Tn0{<8=h-?YDDW7!4E(#c! zn2mV6wwgv|orl^R2oLXs4NO2Z7)a`-piD5pbaFV$bFePjAxd47`047kJdJkY!O4J& zDxirzFF6$4Lr>2n)CugS1A7)42Vx_a0IuT9FJ(9%9?!Pm~C;Er^W^VlBR2Sg;2z_0gQEa*1gO z+L^q3`Qn1r!S<8K$fym%tzH66`TnN3Gdkb@CA@ELK|+Ss%>FflZ(H*~kgHtBS1PTm zr@(jM2tPL`mO!RUC=d{Ro~t@?2j9Okus9ccxL?$yTx3|SCEMU#!K$`T)&G>`zomGv zvo(9Er?om}cT_Q6YiG`W;0|P63dvl~99@>!El%&o+s|g?*NP0g&4@6m9NcKy=Puru z$uD*nnYbo(sH<}vEgCi)HtD+D#T{I;nQ;{8dsP)|?;q7q*>om;DTAaHRCtIP!O!ku zrm}n$zc`0KWfi}X9r89cUrCoht(D2+qq3ng>R*|#lQsf&X^$-J)<@O_GCJv0B(C-B zwOnkI7{sPLE@Rn(Tm{#%ZdqvZCMD$xL!wo1X)lIa%BxbxZ+Q+Fyu|*Y!xO(t8heT8A}Qdi~Ik<-gA}GJ5yJYE6j*i5uwTO zm)!r6F7*q?9DZh{MSW_)cL_GS48ru5yxon>5$jVFtK`!NJ$ebO3b>0KPo2)y*TWF# z4Hto2y|>h2E$A&5djPdyl*@ha&oBmE%pdyfWiBP_r0GpptN+VXE7czx?7fddYYIl+ z<7R;KVJ={i1TS0vf0<^V$}b0OdLafa+b!W4laf*2?9P|9g-JJGmYsb-sy`J$8Vq~K zM@;L;#ANBW$7ZBYmQt#U&^miro|CwGO5S6QDI!e7dxep*PP-i5fNB}%@fzf`Eucak z8HkyTnS7B2JO|P*#HP+5?}>r1yz*NsB8RD8PJc_p9mz3^s45EKlpqr4q9?~&K`rO- zsSc5p+mYqJF5y^+!N;g1tJK|lF(XTjOv=r(1Gq3hYSwb9_(JSIauq3{C`M}pQESK= zbL@3NU=(R-nT|G^=9(kW)D39-ZFA#8uNVy2Ne1jR(Bdo-Ren5ooy?flcx=h5Ge7T> z*?a+hWi~pGWp=_;aqUMew08~BSZRbP3JjzCc$Wi${Zj)c{oF{^M_5Af2lrlkurT=? z3Y&0nr+Q3)$eY{b~t)n)RTlhVzzUYKgN zv?F&`oh@s#x3L`NZXB~AcBQONbJNwm-5%XP>C{p*A(7IU4^wFCcFUnq8om7Wb4Q@| z6#dkgkn0ZC-l0)9x&L$Qy?6K%DPYosEafTXTT+I+Dg`+UBI?AfZ<2zYpEMEg z2qsb(^Qx5PEYhP#1Zgt7?tfRQ%87`}wLDrK^?B0g6Bz9cb~e#XEG0>InPEwaR0mP` zGKZYnGl#!F>+>$q?m|QzW?WdDST5bYuohtEKtjk}i~(34YJG?Q0uCYg$CL%^72=|o zH8zw0;BVM@_v|~t%EN<(P42QJvP(in%c=YQQE$ZzGYto??1>3FZEb*yE9%@iaI9ksowXHU&U5Eaop{l z1RP#SqS=uHd%inpY3S>!pucRs-GAqB$p^b%-M$q=cFcWf{*1rbmF17NlHomAxQN#P6x{bC+!+f~(u!RL+*hXVLE)NvyZ%q3X$XVf)xi-ecN zIS8?F3pT!SB1QYH@pKmR`#Ach-+g)Y<8??Zv)EgO9DZ2h1McX^C8NLX8-k+B!Kett zno)1zK8*+C{=jO~7UYX$wU?o-n$jN(By^f zX;wURx}4n-fE2xSHL?Y)4gneW=OKGSCE8cP>Yt0^{0rr&(Q$ep>5m4j7@cpxfe3R7c*yy00knj9IC}=yjWV9J5)|Cw3;BBP`Ji`jg%b+JY-Io@h zs$a}zW1V%xf_`Pi>GNA95yu11>Yt>!e}g#O74Kgud4G_XGiKyoJK()=W8}hKK6%jB zZ1L8lV|;_5L#-8fa@)vEKmTW@b;T5$$4$F4NJ4tOBf|O?LRB^OS!xQ31SR%;%PRiN z;m-c&LG7<*dWKCA-!8We5}PfIOi9?-Zti3!N5)UTmbcX_JYW8yutb?|hmhyRa0Tzf zPBz1R6VvpLG?JZNMfr2N&-QxGd*@$8hxcpu*n;C~=I&D8+vN?zutd^TBo(&z3F_^i zR;jO;opc;!k3C<)uS{60jZSVa{Z0L-4in@6qFrGGH*4k@I5y#(CB|5`+tCH-g+l9a zK@K#uD>69AZKdF@#h=i+Hy{T#+7(@Ja|_fntl#FxVmP#p4CKH^yW$LPJ_>R?0G(aa zZP2|nz@>UhD~bc=5P5AsF#Q*GGdeVqVL4cHCkYH!WL}Ax;NPMg4E7q9yK$R`8uzDG8b zNJf1jHq5xayY0E>p}_zIjshq?VDz6GdJd5v?3m#Hsw2UC6Y3-ZGeX@wT7}pqviqdt z-i!5g_r#X~l+@IN2awxXpa%&dy?1M|IJx&)Zy$WME!8=r5c=MiE=!-R_?yxfny-l3 zjB6WmpYw7b4*B1476ZRA3oYG5qwr{LTWb2~zHP%G2<<;UJMVRD>%RV4X;!nG|0sJf zd8FPjTC1f7pBa~g7mg?(R~Oj}Bk#cqX>e&OJOkLPum2psNY)9wIzwFgQHT&KbMx~?`Bu&3? zv0v`Aab9v&3lwLJlwKB4*h7(^<8RU!^H$9p1#7ef=&hA(M|Lt7b)M-m7vp?S9RmevO=Kq9D( z7Ho=7Tlo`@L>j9^5^PRG+k_Qh6x8+sY)VF38G%P4%Um%M*e47&rIsh;!~^L-p-S}6 zcw?bY>8!>cvw>n#pnWP}Q)b%AH=vYXP+Mtm4=t??D5e40rw=yeRAy~62Ah7M`AZ5S zRmLij0h`m&Hi2T+p?%I^Q$gBFN)Txj^XkLP^;OKjXXiIXog^#p9u2x*r-d3c6e}M# zX9Wwo-4y5vD!A+qhWxUNn-X!P z*uDs(8h2!t>294i1cM3rtNP7y+jDyd!i#XfJ^1LAmJ5jy!0nl#8K6izF5ydR8tn8ZUMLfk8n5A3U~;CeIo1ggddmL~irHb=KeYosb;uRVb-Sn$76o*SPTEWM zWE3lBZSsI#O!jc3*y*UDPd9Lxb)Iwrj6-sUGn3-Px|t9h3@Cy)5(Bi)XnzNtiu5;< z`mUk2A7u)hApw*HF#OMLi|>{`wecPD4l(NNPcjOW7@=^7hg7nX&NV=-98Vt)E0I_+_ohD-~MI#*?fIhlq@KLtF8N{7`r%jL!xujx@Q- zW-Xk3TUO(PYGt>APEe)v!OicOG8Rf*?=D&HX0#Jo>L`a|0A7y*=Z6r<^~i@FHV+iH z-akRaXD)$WTGu9asY6jtt$(0py~Fv2-(wd^X$2x1asu;TEs$=F{rAzLTSx zU1MTsK|U5hK6oUYU5UQw$(k=yy0wRMIz3=;T}&w$O?238Y-#Ucp|fk_7Y=36n@0J9 zhZ;CEqwVW{-n`0UUevq9b8$Fe+Qo?ae4Z;baF^w(UVy@9ScPQQs5wf+6}FI+D6M+y z`Wx<83bBBj}Nbd+$~C}Bgq#?7)+YpDpH z=>bx~p4R@{%L7K(tvb~|1H+QT5UHQ325pWV4m~u%&`umsC=u;z4r7v+Twn@wDE}0m zQ$5W_v{aXoES3~5D3qG!r39EF1j_#gl-ayce`; z&p}p!&?yPHXGycn4vaRzW60n58jI3TNFOtlxFJ#_gJWEFy`_6oFzE?oQ{2DeokDN+qkAFbDt*uOupwW=?$CWI;3uig;ewaP}4=3HP!0g)Fjd!1chGt&2kLHmEte`wLgG^q&Q%MSgRga~!c0oWd-*JfJ$!BLDy$mamtz^N2MZ76 zW=C<Yh!x`R1s!^p=r{Q;inc-X^~6rOIjaHj7)sx`*gg(oP}-`L{vxf6T@&I=%&R z^m$m=#||!yVx6o(-qO?QC>7UVvCRdP4CPgoTPh5!u_8B%%n6qsft#_Ro^*I;=6D^i zB)ZZhvG|^Y9A2Y1naz3S{8g4$a0Za90`>$`^~0t51P? zN`cM_Xg4CHyVAm-e95$Sj&UqMp7W*BS~^z!2hwR3!@fMjb`X|MY<9ZnTqm#(d-#d+ zs9P*hty3Nz8W@0~MSLls7n`yR+^GSBeL$R$F{7x{?dUF$aM4+Brj7Q@WC^q{X5v+_sKfrFHMI~*4T$VlD_fzBKlg5bm;z+CdS zsc7L1;z+{`2jve&@V(uD2ePWQ!7)I9NHy(afKfLFwgDf^;^l6*hnp?B6qw&z1Ld+v z_#6opaMpBNZlW~P@pcjL3+qILMmaUD;9jIp>r5EB7Os4Ie4VIu9`J*yv#>mG!Qoc# zANd|1Mx+09@0at({xocNcmd7lh{iW%aXeCMFr z5_LbjyGso_>5kPJLOsd6z$B?Wsm_&n+!pd3PaFN_FPA}Y(<}LiRB`J+dpAMs_Tu6! zf=*=Zr>qGN$DjC6%+RiS%R+T$)r<8wzoJzOwwbI2DQnKwrX|OrK;zs1lOSpHSLPz2mdwR2?c^B1#oi))Kd|3R*T&?Hq9?of8!<&y2cQ~pg zR3a5UhsFF%@n=&@)M-?m4zgfOzzfA^2~|m{ctulIq`6YU%>(NC+|#;P$3e2vsf1Rj`Hl z0}cXW;pM=TE5f71u7q8vQxL4^8IR_4U8@1fQ}ckWl4fNQHcS4e})j#WrFf=Qxv5Y#ztom>#q2_xV_g zuUR~!**^_%O&*sXK{V$8QtGTn7Z9a^AS*ShEWoA>r8rETCT3Q`?qOVjn+ah-!U@nX z>1@)qLCp>DUapCTbj7kZEz}3`0cA2c{CC%9&*Li>Fd7Ir{X7#!9iY|7nJhY6netq{ zbT=^=lWbL|3U5^+r8xb@vLp-uJHV{RnegmL9;q8+vxNO11-M=*>)W8T_7JBp|v5w?HP{-tuPx z>l_#G-zY_+XS}9euFJY!-M`!R{{U(v>$Z1a(Aqe`kKzbmxyp9lYw}QZAJ}dH#29}* z0RI2k)HM!tzQ(Mq!=dHAC|UGV!+d6)3ZyhN@MhfsnaEWFFH|Tn47l8gEz{cmQ0_uP zR`azz01w38t$8=(=?(s$2tdTS(k||3(zgo|g(&yDeLe!A{!iIGc_EYIhKgvYkQ|^uMk}aJz-s5c8?t-%JfA_a zs?_EOhrEp_d2XptrGg&(Pfcw**QbTsFMqEGHsflv^R*_5ffP1ip(hh@u%f8;Z!5+5 zUptGgj)2K{nl=vm3&OK|OyGTL<>AIFy566qYM5cA4yquB!(`@Kt8o=u@^KAY;w34o z8>!|AR2=VsvIRLCjJZ(N1t>c%qP@Jx=AA04bJWB1e187+U0^Jfu^(x zH;ESvUcNT@i$i+d$1Oh+sP=}tGwnpx)w~h!uW>|kF71&4a}SHE-K_yWU9C*GVMtNk z_tyq+86}ZcMfnHW-~5J0YG#J+wU#Wt8Vxr82nLpQNC&d>-kCXCJDADEJsuFDbWl6K z8!vfjGo^@5n&7v+LcI{yxq6%RVZ*kbf8w($Ma`a#(AmQwHIU7NldOUX4s5T?YUi$;Csybtf*q4-M@1R(>Q&FdZbfNge zgut%!pR;TehtJMho9u~?Hq)RDT_WUtOKq7)IfEe2MN^%X8);S2zQxbyZ7>sxO7in2MH`7!(r)?UM&@G04+95`gZ0LMf!d znY6UppqO%KpH9cBPA>7*`}K$aP9p(%g+sr}fJNzO3qdin&^|lx7T>U6n-h3TfYzB3 zbQcLtmj$oT(cWMO08rTDpzNnm7zPxE358)pVK1PtSLkd6P#6dbBY?sPp)lgS;cQYU zj2sH149cd4!f1lB>7XzMD2xdTV}ZihpfGkQi~|beg2K3=Fdit34+;~2!i1nO5hzRy z3X=%Rmij3DgNP=FiYA8<9g>13hnFTtlqN@pCP#%PM~fy$pC-qgCdZy8$9+8h)qOCO zOa@CZ1ga~8Ko>%w_aRU+SptfG6fx8_Wm@8fctWoph61ImXNRzj@l0R( zn9O9c-9%w@L_Fl^@EWpIT8xQ^*OE8Nnz=l^b2_57BE?AAbQbiv^2ioR*Fd{d=Yr<0(WRlM;bsF7W{d{e zXlWnw{Y-53;>0A)dhmK8>y8+LV!6AfVCe)7;kP9A!1US$yf+aOe4&hTkPNp#GPyh% z1#Zj`t8;)~gg<5NTdVxqwZo=`j;FoV(&Y{oH2{%8H0*k{b1~11E-r7o=qSHa2Ycac z^gfJ9-#RD#>x@)hBc!JHOIT%-qQ(QqRki!h2+8(QYhicQERs2+$*R@;m`z83`nu%S zW$9pHl=!+BUdzvpY;?=NUvx;zTg}SKMV-mVxO?@4X|1;v>$c7HmedfvPI}U`%7HPQ z`UhiGSn*ZAzwV!J7C*>RozsZ!H4S*0CqJAKMgTpm_QJU=gKUZrSnf#Nw znk|b~8gcXP1O4Onv(n3XcrJG^&>EDVo?PK{vSMVCw3#GjHra;3**Tf(cT{n`kaU@TJ&bvK1lZUMn2QYzK+QD^kL8Eoh7vxB8=qUF}}1h<*8A@v#uy zqv$vJYiB8}*g$Bd6xM{4BHvrFptlm$K~Fz`75U#vqk;2(Kpx7h2xV3ddaDX${tRVS zhcatInYE$JIzeyspv>Q(%m&pTpW%lguj^)w)z&G59GL#oK%}XcQXG9eZz}+o<(=86f}Qw-H3)qteKM zfO}T$qN3*L^?|`8ltBNWWo39ctF?QDtn=krVOXkzhb8;W$aI+=zm$zma`897u+e;gEN7XsGHHgDN8ZNI z$LFXga=4n8y`zRV%!S=9Yd^2%;dj$!K^?r-eGW+6-ce>MrPOXVZrDzVm{H-*s&!oL zS8UMN&@ylw(GF>#`-1fz1_jopb3Xikra4?4+6#`wB)SPPfa} zx~cF(j(mzh^j>{*{msCyY@I=Vzk~xtX`B&-OT*XyX3^o1k}r>B_P_pcg+-4(eM9ps zHG1>{y9yce-n8?nS)V)R0^Sq1z7Wj&=vTOJXvwK8yKFr}du(qTO4vSr`Za&rKvuOr z$z$G$JD&V%RndgkHx_;H8r_&yQ8p$nAel`$e=aU!-s#~Hef0Vb{UW7hpY2VYmU$E0 z*r@hFhOKHD{o58Z7LT>H8;MB|ACK5hF8nUzrhoe`eMAtn!rz_*d^Mz@%4Ilid>&!q ztvz}-u<{rUW{2*6o&0@&V8&{&Hxarc#ABq&!n-Qe+wk(%)4mNbJA^2z2+pC?sx(*j zd1E|?z31wwLItLr0H;Y;Sxphw)*YqEnYeC(JZ`cR=G@N zG3#mu-+Fx~GW8pV(N635KfczDhwg^qvZ0+1>>ist`BwKlJXlnIO*5fn_Z{M8(c7;B z)9{}2>l}0T=@6Hi5}AdzkK`UL8SfJLywW`^DgG+{_ILo+uZ~CN%ECJrQP6lsxaz^( zvs{;b_pSbG9nffD8d10HT{PoNcn%m>pe&S5j+aoyDZ8seVx5^8XZ+q6&4mexl@6>yR2uW#C<~HL(=Q@KVvH3Z=)swP^64 zf=v~rkhKKDL?(_%c5b2a@gz1Dxw6|yp1L%wKK!YAJC4SU`PB z1S5C4g#h!%9<>1M$cnF7ZPo46|GeWGx{1cJsJ6xl~0IU zHFD(!w&eTpJlTwrvvVYP-tGBNNdsqbL1(z&GxJG(&N#;QcMDYJW9FPbA)P}hZ|i(a z;}(uX9c><>AT^aszi&$4c~~Z0C6ZHGJE6veiv>hh@wu&A$ZR0l(8aiE6IHU}g8FxZ zu_xPCnWK^ z``9FbG8Zwu>*SA%)h{`M3cV22S&&ggg-*&mTA$e5jB`XXF-$5_%HLjaoFUlV7lRNl zSA~IFewQ3sppIk(=uSexx&ENy*Gd4q@h*ic>h~quM5UhJbec6kCk~~T$2W2?`&2ty zFPNQOP6TX_HeeKAMK4GTdrUH(WkinC+OM_Zz5Pkt%8b|TFp=-QcNBjgn8w;n)LEky z_WNTPPaAPJ5vz{<<5$xP@p`v~T(8F}Pnxs72(Kwhnnvn1U>M>h=shC(j0X5*u(SE$iB;&E|I(fm58+y*7xXQQ_7!Z zQP=IgO~T(#<@N^}b-=xaxzg42y=t>SPcAyk674E*gdU$?fIWX?<94cf@q~fF_>lJ{ zxRBPQ`DW|qw3^$j|At$Zb>p!*rxyE5qr%@W6-vKz3btro6?IEi{#JTdBW|cmF44#) zXwXDoMW!lgc~+x`jLSw@y_`+m*g>B|yAmBPAx;XRzX zXq`luQMM^4&qZmSOwd4LF;xJ*-~76M7j)i}`=Y4PnWm*N3ZZ}&sQq=b4K zVgy7br!uQ_UqDWYaC4ps`Ix&$8VCbilj8s*4VBLd+&qiUuJW0@k2b)3 zM9wR|2swDaCr(`Dep+i$BgK~|AL@S_>V6ms(Tfs$XuRrbMa{$wrv7nYhj;5SZO3gd<1p-Rm;;gA-e(US!#iOVX*regkPt!N}F5nbW$9Ji$d>*wn67cd6F? z#YZ;9d{I^5^3(FM&Hm|wpBVu)OF0j%1K)q*yWxhjJBA3X1{^S-17sxSMWI4AJN94A zupFn4+4(F93m|J1l5qRT_CmCExb=C2=5LboYmdrAg+Ai}foF{BVN8bHHJ(po)T3W~ zQN|jQ0lU-Dc7kHIp?$7kQz6<9R3M!w=ulxxSn>RcG)8tySoRa!lbjS$S)TteR!}xh z_16~z;_W=2FmcKPB>%4IsxKTs6(ky>vx(wgVE_6LkfFiAe}F1}6AMQYi&at$Q$Z5T zT9Oa{4F*UP1C$UfxiLbdlFbP%Lzf@_D}^m$F)_S8o&LeXnZaR3zBIM?l)aef-1$fD zRb~z zR1m7VVX4Q=RyprsSWH98xjmgE4?RT?a?BB>+ z?egBkwY)mf&*k~Y)49NDUmpChJ*y%jkLHeiQshhP%lYF~h23*-v{g%DjjI1}zn`G_ z0+v=`8~i(@&TuJAt(JhQ46=3>pk&YJR6Vj*%LC*lLWjNm3a9l@y>(v zkF6U7n#8%ld89O-B5rY9=`0ADWwEO13JGDo=`%DRNMpn$oY4!RTh&hpQzq{p2~^3+ zqw9%7R?9Z)Dr_FQ4U_v80uv;&&!$Ww;(6@ktcre57FC?;=5XRM+liZcNQhEf#&z zQ;5mtrZ5t+0xY5@3MUt{-;TleVv0bMqAsI!*q~1&a|_W}IPV6FkIC5-9m~dE*x5pg z)3A<6w%$#{^i%8T85I+r0Izpz3q?qiIo%QioX6UbJ!+GH!@_4$QSIqwCW6}eTez`U zpr1id(?1$4Z(Fo!@FVd0WCqv7`E{RTPBbJudvczji4SGhtDF4ttWAeCS@x0Vv+uHx z5T^5FVFl!k*XW$0*)|c^(j`7gZ%MpsLK56Ebd4{n2fKIK##(EE2UF8+b92>^O9Fs{ zWrV^Y9XOfZaa{r?A0R4!7bmz8+jqT;U8hJfwCeZJ@vV<4Eo&QXadUAoK0bi#nlCgz z6rSylW&(zKT3Y6{N%-H(-T*)470#hRiWd^spa-(QPi^{hf0mti)o0L2dq}okM>i%d z&f9bIV1GRDf}M&htJ%c*YU&x+rThKw(j`Wo9lwi`VS7r#lNTc;1YaCOtVi%SS(u-eaPY}JWo--O5+=DQoZ$Lljq&_h=v2>` zyBxDnXUA1l{I9oebh)&FxwOT*t^@SVi7>t0AvXf%S!K&D#Gm3GIl3D%xUV3n!iwVT|TId5wRKl*|s9{gljkK zYDw&J(bbQy1?N(kCswfcO!dd?2edaZ#i7J~CGckRF6qyN!MzZVNm1Vew61f_A$Xy ztJBN^DLkkWa;Yg0;QsNFJ z3VB)3|P%xKqrb{91=bHpVU5#Zvu+>IY8_S$C1d#SG zqWfjxC&&2e8_Jy9ybQ1(l0zl6g`2WgD^HoooOk}nWa%G?Uc=kHcGUi!iLpnQ%c?d& z4*n~V6dM-XW3!p*yq0d0GEwufpNN))hy$F|Or+b$AeY$QdRB}8fM0q$!BkUjvHQ{{T4R<#8$ zt^iVYLa#ZW^_r6D`KPeC+D=jN6Fua3t%-=s4+WdlOnr8f+V!samg_B~zsfHLwIP-R%eQZisc3P=( zt|~S*Pg=2MsIRtXBeZ*k@y1dHPKu?rJ;G836ca9J5z@-2^w!LF=JkO8!WDHqfU8d)U{(oFKDQAd ze)kdTwqUgWon90)oqy}l_ucBqeA{Nt?3-$4LjJyVk|CE{Hm2 zCg>=jO-Ghe?UejWagagxc5%J`j3}N7v^}5Sr*V+jn4fJQrHOc^=^4c@S}{k$d;jxr zf$-ByH+^Dw;B&{->h^q=+)&>0SQ|*i)oPPVfvM+_hlUYgp=2w30%!$}!kZ|Mo$M3s z{tRuPS6RGfSjJCfjvAHXUMD|AU&3}a--x!Kpt%y}V2AE8r(fs}Kq_^#* zq#ZdQ!}+@My^RfZ3$PmeMS+CkI1#Hcb}WGd|lp_nCGv*}3g8yc%$*nCEVpM(zDV zW)Tcip^FVKzyi__%$FGOW!cmSH)MMR)UIVjPIMf($0d^gP>snx@nQT0y8zcQ`<;$i-ac87trIqR}! zulHGkh+7iZ@7bR2(XqC%zY9i$%2q`vbv*0tzYVSR0kdJqca3;`aY33v9_#nKgpb0?YGc%G}BNz>ImhRO9 zJ3bB5eO&cbJi-6!EX)Izpz{`Ks0 zLi%S6^bn${zmmoC+0jASvGzVm672ou7~OeAozqM{6fIHv^8W11%ojEU*j`z-$Ft7x zYt}QHz~x`U8f!0cy8Y~pB#hSUTZXj@UA%*|7xD=3_S18h%EQtU8uW9QS9@(-GX~R` z6|-27iw*8O5S5V_#yd-7VfEE&ULJibQ20CxQpeKZMSAVJ_{QKCz&^vKLbG-@8h{fi zVAT@6s#2ay40gTuZl8CdBi5h8udwVcgLW4$fZwI8@^}1pW&0|4RR}dxJLuTHX(q4e z!2fz{ym1Xpu#8~ULj-7zFV;#ydi(z56+8{ujtBWU zP&?%lwk3DeLC298oTS2Ju)ibYB$H$5YN@AVSxAQPm?c@e-ebC=qva%mtij<+W;GCl zTmx3=D63I0%EtCG%Hz8~`9lkuhW#?`x8F8I0fQnj&0v*WZib2&8YFDQ62&G4hu6_%GF>aj%=0wAB|45=lnP1q1Pp68?2Gc;O zH9`=6pU8GjqL@ulC@M(}%AFGq3#|Q41XAZx;1Ax#4b#{zOlD~HL@_Iul?#FclS0dT4c3 z4!YliKzaqioP)AmhfYT6;8cWBcgPz&bh>dk?!GMgwhR-j9FT=k7&zbMksIK;z@7&V0&zCffY}#ysrYWCPCt1VaR`RUbu5Vuq=GBD0 zd!qL=l5md9Q7WMHahe&rYyX+|>Usg+P>zYqp+;|FBj9G+fQT_U$giVx$n=@0K0 zY@W4a^SDszRucdg5Co*PU+5|y0^)AY_7oZu^*X>ZXfA+-SKm8(c#t*8H5t3odV9_?fjD1f4V?5@Gzxv>q{cecE zSD&@?B?W85qBkdV`}V8Kc%c7;C8zztKI= z)33;_6Xpf3LE-4A%mn^T=k9dM=Qdhp;7sW?rPUsJfO{!6lRScmv9&YLU@QY4|J-Eo#&-rKeOyZ>Uu=xuvszBDC>d!(iP6ium#pD#+s|6 z@%;$O16cmdIniruzLu8RB_kk=QESyIDPh_}*V^nVgQ}+TUL~s7$%EM%u^cxYbZ}^G zY@3>T`(Rk3cp*RY{)FjI9V*qj#ZFfJ%7Iu&$H5@Q5M#67d-srWri=qIo0@)BRkJw6 zT;nq!Z4>2`x==Oz+D0L2(z<+&VP>V2uc!sSTyXu$SL?{fIcDJ`_el+KFFQ50IhbD! z*|L0GlkW7X?(Y4&YOM^MA)C)`M*IJi<{K!7p47bWp4Cr$^oJzeY-Yarg0dyn;~VQ% zdumo?Ij?JLW!9tB`LSDWCpEGli6NBKF(tVCQXtHz(fcZ4qM|%j>IV;(T~jpSj}oaD ziu}TThM_c)&xKwRe3&wV;dMCD1X6~)Bt7J`MIZRx`SRL}m3xj_Go?MV@I#=8l-ctK z5p;u3@sQz3&WAuGV*=^nIJ)5gHe^_C=x0ab9mI+Fhuw~68q!o1KK`DKdnv;#JW)4g z{rH{9B`ksTc;tgjRPm6bwuX7Vs#eOny>`laW)EHnJNFWk4jRHfbc#$5yu2x3Id z0{r*@@&};hE&l_Ma0nPtF|zBs#hLWOYkx-mPhsEv&*uC6-=d|ZR?SkPt=gkSz08nW zwP~qU@iJ?xl^Uf+OGjeVtUY1{L5QNHwKr9=XuWIHqJ^bF?pKyWZP&UPLMSr91=FIrkN{aY64W43GJ>k4{6zT-`}btb_OVV zv*X-P3SX;lS?`0Y)cW7aT##Gr5YpuNe$noeyv_zQ;1Dt4#JW|n zGLJ=FDx5rT57)FQ{DI$_w6y0clg&wq;B*)qXU_&7XqNu0|W4vEc- zh5<8+poiYPA|Su2=_DY3^84Q1HL|ro*W4J)&eoHvM7f~zJ;5K~+lL|M2K8EjP1m2h zU%TOUArsJl%Ji+j?;wgKh&Q7M2^fI9j>k*FFbN`1-??-Is+NI~BMXJP0W zob(^*`L?>az#eqkSPp7d?*%u5`N8Rod1Izj{f-z|Ppz9&aO@ResJ~v|(HcGZq}6s8 z8yXp=waNDlv>A5bQian+QHMz6jeVRX)8<4iusYq{O&s-WjQHb{E##n8C2Eh4eU5JC z)$vR-3!EQ4+X$xzFRJhZc~=-I9f&P<{nVW?}bUzV-DZ zuTQUB1O?Br^>tt`RGX{oqs=gOl(mW8pzFNY1KXgOPA`kNPA_Z|JIbulb5JJ-`CvA% zw%hBs`z@4_3v{~VE>Ana8SNg`>GeC83dI==KJrF9m^}*qFkPZ{&ov93lHon0vRc9!8J=A z9#XvwhR3M#V?r4f^(y2kgkgTqMwg$4e`C&oLSoXxPGm)qy)&H21cinAC&FF-V$I4= zda8a;3gz_c;~Q8PBezo8U_$UeuC4Ai%EyX5b)Vmrtg!G0SMtrak=CM%pC?z$TUI@O89>lhb!Bo zFdL%YINZc}$5b0eh7P^EyJSjTQh{>>k`X0mo7m#{uBQ!C1f3PvUBe;xRj}qt5fog? zO(aAAlTF!usg=iYYJN1j#oCy3OCoppq9f3C&Ir~>tSCWA`TAvVlUw{xXf0F~9jeyn z1fyVwIplwpDTf%tU7aT-5X|{Z;Qp8UFaj(`z7<1p>%l0#SwD4nR?`iDL@Yom1%K(F-Jk!2 zt#GiWUG`V3sA8q;psyERu$Ot|(c7cZZeXI|%qcy4=OePUkry7Pd2{x3GD%2}UE4=9 zjY0Ek@5Yr7M?OlOA@!93xJEhEE>W852BOWFT|RkV7zEWoBazDDsC^cfA#ZBbtXXPA zrABBZ077jPNP8AAdrJS%DhV1X3XKIMaQwXbp_t)O?>+4rO>wd48~#s{JQEH3Z(N?1 z_o34|E9PfQH0@{O?eTwVsxPKXx17RyFO}6f)n7-$=9;Fxm>b=_G*;&{e{BsaH}3P_ zmYdCmn+?v*w!+OO%){ow!v@drH`IWM_0ZkRq#9{dqxe4~_dn#Tzkx}Bcdgs?$_lEF}#I{sG4+(xg`s|m`W@kni$ zC+!sH1%}!vE8#qEwez1H%GoG|aSW&@ap`dF`3($3s zhh&_SqpOcAS_;Q~0$!&Uo|D!Lh{$Sklx{)9yvP2sFk6;lF7tfo8eyU;oy!Jyu^_gI zZWKWK056i8ody^;hOs6UExBQ)^J;D%n@b()n;}aYLAV;{pvXL)V~upnjh`ns#vdES zjCaiDp#@qgLLcudtE_}?sIqw$ZG@g99;B| z%>Uu#d9vu_Ak27I=AKrtIS(&S|3*)1ah>b|V{ES_mrg_?tMx#8Z;^#1&?mpYy|*v6 z*O5!dM?2oDWbc9O*NyJW2(fUc!RpZokV6YtL_H={HX*-iP?e_Dkd8W|j=j+A-wd|} z$(u>O|K>)a3_EF6_cOz;My&GErOPioE<1Y{@a26$sP64k%g2dw6+S_E8bKb|D>MRH zG{%{um+J*%`xM&hAgU7KF?Mq6J}cotZTO38^>8H39edbEaI5uLGYL3%e`PA5ggVK2 zk(P+tnx=>)%~B%1P*jZkwkftg-P=aGli{mq%I-2Oz&vPr+voU)I=R~%Jv5B({xeH> z-jBwN&iw0z2fuB+#h2O3bdG8vhcfMtRb$qpXR9%9&Z{viuJ~$Tu}(<0jCq$J^v^@j zqRUqhu3HDP|0o6{D_%svy$%=|Z@A4~c!VHgpdU_7qYf;73t%KF1abJD1oJJ5ydjw| zvHQ)Oj8Q6ZCP&(VB&n?vWX>pvD{drAaq2BBMa9q1_UJs=dmkUEXTSv>C6c|qq(Kdn z(@N3l%x=OjT~4fw)T(ltj!UGUZ9s_Wa{!mvshSQ1 z4U+G*_v+H5e_>d&{s)Lh+h>Pq3%XBKsb`6uX6Z+lDa<{d3I=_Z;o_&RYOIr09|5cx z54WN^>r*ail`w??P+^`q-bYI8SyXV2V`IdzDo}hL#L;WQy9l`CV?nFQV1@m0b`-=x z=Y-YZxC&yW*WzFRv0l^SxUR*)sKw5tYAXMd@i$_5KL0hK@GYP)-c*N%;hKw3R7T=? zRQ@RRidliY5i1vnm0OF0M~j13i{sYEuWzRkXBna&^O^ya>%U%af9$CYP>&bYh!+LL zi)zJ->MY+rsCYN0vizqY(lwV}`ImP{@v@2nxRce{O^7i+)xM+VGfd(AwfAneBo`89 zhxep%&c&&aybY(9UkvRt>Kc5OY6=U44E>BXig~gGwdak($-d?<);>0WHcp!#2!=#?#!Jw8{MI;Zhw1zq+Q4a+=%%go!|) zqFfkA2Q9NjF+Y#IfB%PVH93zkHFj7UK*S-=BF|0+U)_9zr=*@-I3+ut9U_0O0Yzop z=9WJwSw9WmyP#@`@M;^FuKi;$_?+6If%5ctv*wBvV;^NL;!Gj!P;A_;_L9@|ThZF7 z@c`ZcPHA2TLo?A)WV!Q?1(ZRV^N*Ov{yoV~H##QP9RlJ~REp<>jyLTFZPfp6@<=i) zP1ZZ&>YPJp2zRc;=yQnq!CmdDcHlmg8x7L|-UK3)l;>c4i73;Z{KaL!c1@f zCh6Vu-C`!x9eT8?gg|d!XnGSDhh|_>h=v@4ZawO?1ezs9mu!pmj-43u!~dtw!6{fN zk9Jkw{=RL!JzCSij(zL%5TtUx=x{vMY&fEdrBc_TCP5l?=5Di9p#Q1EIq@lFB1K)z za!)xP$e5L<|1mBHek*S$<$DgWT(9;@>RXx$*-o6{T!$^>{KsD>?k`D7@UDKpiWeR` zW445rs3q>t~Llx0hq0!datZ zM;%3a8Ki=vQ@*(rMi>9L)SKQH?`5PKou*Z8iM$oZOy(UPYN)|yDf|`tOm#8wV~-gQ z9urSAqRS@adRY<2b4E@LxzF7zCjva7*iZq{TAPudM@s!SsUCgDhTJthswc72Lk&`b zEOMqg%h?gf;Tb}`+!AB8?^#_wV?));YVX|6Qm`zJIsU0*?WC=on?GXq$0`XX3FTbp zfm5Bd3}!o4NzABj4>cfy8pBh#(Pjh?6$^-Yw#i#u1NzhtNbuop1$>deq#?eq(WIr- zS1fd`?nC_8hxmgJ@$CAaIsrvEcB!vY#Fr?ROmq2nVv=Qy134lUWjgUdYG zV_SdgyTf{=Bov!&3{cUxT$FBOjt^XW&70aB*F5ouhUW9NXO}reIqQuuFjob0^;3>l z{oK|PY-Z|K?Vfz)53eCbrQkdIm6b9K2l(6I8jm-TrrmSrhH14|#A zV4_N68xRo2xlW8Rw-Um!EZ5dY07X+%G?@I?4FUHN*PXu*qvyDn+ZGxuB;w{;YV@6t z>s;%WS+0hbzP9SR{`rdbQ;(062zhjUkuU2C8^&ir- z&^QUxA}8{?>SBPCK+E_bL@Qt zuijAQs*H=P0yrfT8*b-ndB~h;J+w&$WRj@B=Lq%WT-JE7@FuTTlie4 zi#p=_x)1$Mxd+(@zmFAsq7zjxs?go5SjnUC0WHoOc3rAtm|Jb@f?Ld1V5Bvg&Cv~v|-uWab zY9aJV@Z~Bvjm+uM_q-9#BJajUR*teh4d9_!k#JluV)8S{wLM7n$?R{Qx)N9WTk2?0 zpn-vUBH^ppn+QCe`WGigab7KPcLp#X!yQ;tAQZpt-lup}v#kApC*v!-?b(%5=T%+| zY4VN?nxe$WZKLhPL6xzr%f1byGT?T{~=*l#+8T#Ur+1D!~DO3^5?3FijI(H}69LuQ;H z94}g1zQD=*gKrRn#pO;t>z*M4o(qp;uVkWB{a<@)F(Z{TIxp(NGhoy2uOEGcMaXr2uOE#OAOrt0@B^xF~H26m;d|a ztaZMfwa(uAx$f8uF<4k@Y_94kfRCetg^h!yBR99biJOJ18;_fb3(~0p<~LGLK)ik+#~;JS;0aI>+ylifg8;%N1EP6gf1WDYnjA z?bSD{kyrprV8Np8GX=@nS{F71ZJp5eNn!);C#g^pWf~eMt!bEW>JpZ>T+0J{=$6}DKK=Rh-GU7wp!Y41W=R%PpRMNn{y$G zP3*#-(wE(GA-M4mX}O@Z@$Z+9;{kCer^nlPPK@_Aj@Mk~W!Afj>)N%xBfQPO8_#3) z1^wo1Z7WufyYr72h)}T13Gbh5YkgqD14Y=GNdQ0qs0mDf1HtqgQG*bYtJ+Tw2}TO; zR{bc}yZc>;+RLtoVa8`)QKV<}s$}|i@2^5mYMCR_b$d(-el!)$;fW>*tNXY$f&$(` z(Oh5@w4%M3MaQ-mYeGUo!(^}CSJ-d{MCAl4`vvR1|3rW=Gm}61ME|$X+T>;@cijC6 z2@rsG)IoLlpKBG#q%ydC|8mG6Y9!IDy}d^3j?a3x?@ornE}iT=+y-Lzxd*3DObDhW z3UXkqAELG<8{l3g-kKf9EQ5=-G!YE?ZHDWqI!yjaN~Jf&*1moqCozvJowW_Qu}P83 za)}abDF&8k>{Ox=*DCx$N3apZ(4EI7CKQ7C5x_($om~#LLJ>EC5YF{F57sXgz5#;d%m9ESY-VvY`>Ci4s1 zal&P4J%O6-F<_3n*Jr8#u@qcr$qr_){_<=fr(h`C8M3F&6j%z|_Gl%Ui;6aV@3YZxz2KbMfc?^kFo0Z@LG z!Cd?Z{x%P2`UC2oL)FgT&3R*!?RF}cp9o{_9MOQNjM1?^$i&h!5#T2b6so}N<|U8M zATYeiwlBt?>>WEUm$WG|;0tBtCZ@>JSKq7vGLQqKRT#p3qKDkIn*9nBkpR8_h-5&5is5*IxQrjpog{Cm^Q^0Jt|ns-l*$e99?Vw523aPj}NDFj*{cb>2Y1ExQN> zGW)@nrj{SwEN8WJrE~#jcpaiY+27)-Rt{GAPm;QOUW?LyeiIx6eRvf;^Q!{(zyWj3 z{zm-|fD-8gnr{#^`@qm#_T5KIhZ1GZ;g9iJk_GYf-LEA*>?M6*(b=~H%aymIvQtH+ zB|nOa?OcjV+462PfNI(Ewt+m)95F^ueS5`=9Qopha~*+))*>WAwLL+?0|{jM#7w?7 zrGf~rC+_{xX{=3~-(vSw4_YwqL(|i+RDQ(F9^OsE5RI2b#gDGvKCjCB{v~^NUEmwK z*Qa|oZi{#NERa@NZfhg4A5N4F0#TI*ipJF4KLbnZXlIXGUQOS>vvk9o-2R>2$-fri z6WFaSTnHYcb98>jH>)$@b?^#}Y0kQ$<$DtPi7fbgdk8c+s28SyQmx5dFI=O8oXi+% zV>gfC$d)nMRC$X$QD+gmG+5vEDCk+@&mX9tHOMzUDA8kkB2a=#AMd&k3vz$OgH$yv zXDVz+o2S{tJ>Txiw?60(vaFQEKD2!dKJxL!bRdkiIE}yW?=avV_znCd5IyY}x=sI0 z;+fJ^lFx$5Ap(e$DJdyHYU^J`=%sdCT9VX#TqiC{tRVmDD;GD&sv{JH{6X9M8v`j*5)0 z|L?5v&)4Je)jW8>avWY~UicHPr!u@WbQUqrMNG9KGFn}8)!OSQrGzPIeL=fW0K7PJnV8ptV+|UmpQO6Xc`| z4QUzYyZO$A%%#b{v*V2Wuw?V&aVZ@&0F*t5VOZbCtdiphI1I&$9hb|z$rum2(V zvRd{-dcz$$R`5v{czE{K<-N<@1n~&U>fV3_9OD-j0kZ>eN)!+d2fW=6w9yX)>IaoS z|21l+H-W6jna;FBNZ2qt4tOn<{xOiwWjG{j_g$Tt zIiB?y1IlunZ%FpbgJA8x1M|Lz>cTw9)EUgg2<&8cmS zH0+ShEz>aTZU~Xw}kX;&)OnEt@z@4w@bOJyelDrdX zykO2eU?d&dg3HqPdxhA9mx+1R9ElR4$i=+vt>bh!Z#7WY_+(r(KuO#EC?(1aa8L@J zutX|kqlfLBll+CZ1o8LBlh)Ji2RZxClfHIuCA#{FMhvzhNW6UWgD7>EC@M`hvglub z+!OE2^DM_yE@i^pfaKkwM#DUi%?!euhtbv0N+a=(qep+gcWGlPTQ`x_f4U2mQr2pVN zK({0dTM&<(-3MtYg5`~s*D1w8wKzajfwi-x!mJ7sJQD$mJOjSGL4cM4LHH3B>QYP% z)m_-dmNuiv~D&g9Oo)TqqG*Frl@#3Hbz@ z+G&6T6%@e5waFoQ=?8fjpmlu(1!L)Z!l|!7jNCjtvJ!e?&;yytA26`hJc5f9Sav6T zwD{sFj0ki3$P;j?aGH^i|Eahms*4Kr{#h7=@eo0tjA`k~Oz>MGh~NhSCSj(>8)asm z5Y|*k(zV~#-xieEJIJ=$60CW=W zfTaeG!DTA2+Ap5-PYylhp*ts~+iW-kU%PnoE{BnO2AU@6%3Se-bt5)U`oNZM$<1J} z0&K$?F}3tUh5}a`PKf^B=k}fMOX$?>){2!swwR804F6L1Zx{cvrmy~Y(5*$IV3SQi zE0;E#JC7P%{3Ok$2OP5$p;xQY(DF&+J*?*I*Br75=@o_rlg<+FjJE#OzTzsbCXLJc z4*UH<)h|GH#UtkZvO64GNGP32XqK$dl8}IDKOS7PqKwH*UaDz5j_>_hWHob^onhKq zTW-Hd>3D|}o#}`J=Vy2`fwEKeLZ3Fz#uhk_58bW-Ws~g;qFe}L7_hhjR|Er3t|$0} zwO{SmMT~qL{th&(XreO-t!uh#VEcu4Afl`_qXEO92Ov9p32H!Vmc>x>*Raw6j zPi+P^=n=C!Gbyb=$6o{{h0g!ZpL+R~OgP~K4pY4<7DlJ!nSW5t&10M+CAqH+M- z#HY6?cg>J%0?<4HH9S>7t?j*0IMBa!{ql-`28wxbL;<$FOsd!=nKZ?qj_-h-iESf` zOF$d2I%!BAIV%saaCu*+1H=FR8|9Z{DQxk?ZU}7qOH{ci3jxF*f*YQ|6*~ar5AdNN ziNY4gNFwC~ECpd&SoM`EGz0{t_i>$v!;wa`z6_yQ5J_v^ammuu^R?4tBn(hqeVCVD!{xn+69*28x>{%%`KoNl&}K!25NnVI+TLW0Qm70 z!`oMiVee8gdLVVcb?W9@#Ft6L6tcxVz&f-HIN}3L1u2IUMX@Tyn??j_4_6m`X(xco z3Bu|BMGb!pPy&O^%B73yez-HT>Q*j%s^vNXIFbRHeK7aQ6x4N0Y=@bC7qAZ zGoII-2d)2XJ%fkqALl2-O{S8wScJ9sTH4o+F^8rj1K*)j(^s|uoe4IOXAIkVZ#=X1 zHoEUZjp*}R9TDu9H~s(N+*qIVmh#c?;{PU9&pujRB0EEIG|NI^D4aCpp^VQ}THyLm zWZp|#Ct0i*Bk>xPkFHofkN;2ews?L_!gt&vr%M*e(c|@*)h0;rOVK;HBK_`+5|B7N z31%k+^j^l!zxs1~Q9Aixg1cWT3Qy$1S&IwgqRv@p z>O6a>rIwe{M=2+_M@=o889gm!TU%Tz;~aMUg+^vneEqbIhVwD?&KF~D#|}eoEz?%N z+b)Lfo9*inRN=9u;sjpq-Kt)`-N=~MbWCW%I?}!U`gyfOh8nWraEuir8av3i-VB8J@jWO3Yfu=T0+`U-l0J+sU$C<46!UgRB#KZQj8QgE6t{J*goqL%> z^OK~DAnIy;mW}8O-C~e{OQ=q5Rr_m8v6-73=x*Q z_r{pg&6kf|3cz3x-x5yjxFzP}`9eDJeEu$h!3j4-GyA9I?CKrYv-x?_Pg5^-W#@UF zX>(Uy~fuiI}o_ge*ycM@?C)`^S4lbb-r?EA%uah58V$d#Uwv-;4-I1< z7g6WWvlK#HO?HM&|pg=cd~*A zl*NpqF3tyFn(G%TE*`+yi2QCj z{fdoXA)>c6c@l4ES)MTs@+8c35)I=z*d1#*iXLVa>v~LBS=c4(8tSv-=~+v^hL6qPf9j?4KJi$ zobvq^=GtcDvEuYJ+5TnS^{mEjD5sa#)f;O$dokKCm$;!ATA%wq(2Q6|C~K*}R0QVh z(xXYulbi`zv^vj-1P}wBDN3bm9C{SWtm_WyZ~o8|vMI0SrBqRhroM|9Fj?mluEwF> zGsP{98qdFN@o`chLbsT6D`{BeNz~PGE3=<973hGfbC$k;AIBl8@Pqp~ZsL!=;t#H9 z)*A(t!s`P9MvEZbkh-(itv};D1lNUxoFh9u`RTsQzA!51I=@Di%%k9RNr|OIiDikX zKKhtnZVyufCJD-ITK2PV7!tiyLr8o?8tlUsb-56Gx}Q5NmpYkIjdW<3BkW~OJ^^j^ zaP@ys$ap;mj}|xZ5WzWx#qv*{-|5dWG7;YbjdtcL+2^U2Fd+r+#9!~#KiX+fd|oLO zEbjlu>K+hXaNE;SwEjD$PRcdQUWTC%ZxXs4oN^xFhd=D(CsG{=-Xt+HK^8L{k7ToP0hV^}m{DhWlcdfUO#o|mAW7W;Q&pnkgfF}- zL5_8sa?#eGAk-2ep;;#(+`;xj38ed5N@no; z|6iSo&Hl@1p94sk7r>%uV;_vgIT+vxpMnYiO+y{CqfM?s$lLqzSJA)BQDBbM-xiv*30ZTIRdT6kS2AD-1VzbbC};xDF@zxDTB9%9c8#$}6u$(~L5u z_i)F=bam5VJKyVJ1JS6 z$q`I(qiugV-D`9DPnxHcgr$a;Akje{c6O zi2ja}{a7tE_XQj8A}4K}!_o2@z}TC_+G#{26o86m;3h1;O3ecs0$W!GOHc7hQUD^u zxqSPUo$!<1H=mnJ2yPnCdvGxpw)hk;@CY#h(tOvJoQ`5`rswNJw!ZHxjxbl+IQ_k# zY${w6wH^Lxu{#-!e_68xmhPC^$ct{g8=WlB4d(BvZ z%}~`6LEzi=IpXMjnK|q&)Vhb{`mjH0ZZwekkmWIuvw0^O;yd(t>gn+P6gea>$BtE& zH%l5iSUqSiZ>L_d?Zk0=ykyQ?V z?};sz`<-Q7*}R-9$IkX%`J~^xeJO#0()^8_j>-FXl6yfn#*eZ~=PqV2UmU?uj=m&1+|2e&U|>-&pCYlA@HV6Ou+EUA^?ii03f4q7F3I;nzi+a?tD^qg8-UsIa_6WuMZqoSSgV6)cKX@eDSNDY8rBxis1fL;(DG`#NN3!b4!`Z?;ulDveKoUs)nBbquMD zg?j>)rw}T_7RmbDQ#~s6r9`PBNq@2c!e?{4nb%dkcNcJ^cDBpMX&rR+Jbr}55a=VL zidtL@g@_Kk^b_oY^I0keER1kmf0_bT?(G6_A2*CmT!zxlP@Y&2*-}k_UpH4Kph*UM zc&gn45dP~Ib~5md9gxr%uV#A1Ds&^`46xb(TMPi=?Cm5P4I_(CFcWIS`=On%xCrTCOh5CM1-NNeiH`Frz7 zKXWOeyc+^5S*lo$rM&}khajiE4EunWJy2S)2z2*=%YlsRfvb8^YHqwtPhF>`e z9IrS!fZ?b?P$pn^4%@iz0h|!Lf-h1SvXlD%g&|GUdky!&j<9qbrjnHe3 za}4K2u%s;G?EhwwR+IJ7m{g$oki(^P#i=I4^F3FCN9Q83hBu&e^*)EuH)rMJo!)1q z`*ls64hn~`r)?%DnK@bH=tlPk&*vpLx|$c^5NYr?C0Te#rSpEWFB(4bP#@%c($AC>j^*wHK%bD>nqVCPO{JiX)JAg!jJaq1Pa!oix2ozC}$zEXEg~X zn=W3F>xM2^g2V)XD-{)!YG4HYV-RhNKMGm9Gsio>btT(@SZocgIKr@!AEBYd_gc5c z4D2PP$Ydwxgo7DUMG7qp(nFQ4fjdT?tfVzkf$|i0qXS)~n?-aFqr0p$n<3OmfvMmQ^AeZe`2=QXfqpcGPIy-)XQPlu-#;Lb%>} zB;#gv2&dDE%)Xk;9Amqg<=ks0!nb$Rj+2fU1DSIZ1;G zMkpq<4D`6}HzIOh4(%;it;brss?1*!zpFT%uO9}9ZWniLJGF5&;*sZ&+`R`Hv$c}* zZ$z*)eCNMN(6DQnN~_lr^=Q_x@GNM^+BTa8{+IpE12=?o6I5Q_2LaauF&E6az=3dt zsE|0I(`v>oQ&a@aJwbD(83gPB$cvjqd8rMsjlN%c0!7oVpPw{~b?Sm#PA{mF){?SV z*2=jP`bSW+=##_}b^=oY$LzeGM=A4!3p|2rf7*AEvzh{;3P|XvXRJ&)9vUOL4q1Ge za&E38rXTVqJ)FxT8Xm8fu zjV^}70;d>N^Uj8;&w9 z>czrcch~~ej=nov^(rNZw4ofVJO%QoQ8?+)`A%2Em3z)wkQZ<6yz>@rwB;_UOOZW4 z=N>k?5>I7;1GYx;pNRv2{ikM#wkDh?XtkBs1tXtuu@9 zz^#l4cd*_X#dWlz+W~n zotXB`{3LqhN#$g$y%1%5YiI8OXlo1Gk$}HI@cY%kcx6TrKAG2be#qCcTU@W>u`lUf zb3C+}qUbb2w-O1$0@%tI&7>ogM1dB+cu^W?xO6^Y5cJ;g-FyV43Ak%jM&A;N z^1}^&>u2}6^fYjDc@sZT5@k0ii(-6zgBB>2$2A*NZ9u(Eeq?aifyE210HvIdVEAvr zwl}4-KMEL39h%T`rn#+DQ>p(TTl2mCVw>gu!+|VVNni0_-uJxj$Y0?;YX+K{rc7*< zOE)W=P!k+J4QSAHbUuYXzZNlUn02_Ao#Vy&d{%n{B6rfV;;d9*yXCCnkaNXt-zfOR7ohob#=-O@#Z3UaN&=Bgt_2wUFOZB(;yX zGK-)5M&)gEgxXf6edXUTY%8I)fAjweofw7!KE4c3D_PnWc7(i&3l@@G`Vd=YJxY(7 z*sPR52PY2Bl}Ob*k>+fp7AmR_jsc1KEo9gEt%eyChG@&P^1egij#hYwbTJVT7ZQ#U z=VKQX-M+rKO`;WM(42Ncjfz|a==!PVVRsGP*nNbfG~r=IDxF(q6n&?E*QwL+`=`sT z5DUe*l37LVYEE$DyS}ahvNQzWYHQqv0cjU2s8gohCr?Ml*ni0r|`%xBojU?9_SZsMpPuCzl#r)w~#?gTMuJ5|}EDi4EtIcSIRIi+}DHAA^MOw4DX>t}cY} zW=IzBe2;eRd^5zKWul*MY!7dSKWkCL8Z0~;Of`7Fu0xE<)iqP?nno8M zR(igG5K4OVx>))a8!X~V5*%?i77aVlzjf|0uFd{F@`Cv}I`sXLKobkd%xKsoZk2f) zTG}F{OHArDqM3dz#ee_A1sjeL{8fg$nS7tSU=3itL`3Cyh!>yQwO>~7RA?sAVX)r> zea1tj`unh!<~u)RpX-zytAy1ZeAC4H7qdELU2Y2~eE{7^tpaQ}ST7k#2GsE@kZs^8 z%>DJ~wQlEZXo5;p+S?2u>8oTBtLyGwzO%^N$7rN^@!>IG#1Gacw9sZr~&TCgXkmJI%CBwwF*YUNyvdxYG;e*XT2h`XN8U z_##Fe)G>4hzhz{18bL=Zu9P!aCGLU^ha%VzUBnv0h2Qs`(-NQR{(zcn{~XR%x}no!Ir30|7xS&w*K(a zezag-aU;($=!_UCAL$!@^_FSqXV^}=_Xg=sw9l6J>+MKbJ9*n%My*+`h3UD^<@$!? zX_(Ekm&@C25@o59H0)!0dE2WPR~1~*<0z|0MJ*cin|C#hx_7c`tuN6-Yk+?@u;2wm zB(LK6*#pCAfLQ|^5)C$0H~HxHi(OcXrvk!PnsElIbp~2V?SNEU;O?0I+RlEh{U(Wx z?K1oxQM{v3Rd7NWJGQc;|4({qK{3Pd7nr$96(NyXm0warB=A_Juc)u1sJx@R)(rgP zz&q9N=Mrp>mSJwz>$fWq&E(+Z`5C>~xRcIrCdJopc=7As?w9oCsKQo^4AuAG5g=Xk zW5OFpB!_e8)Y4J^BcW86(-w5fca1kE+koOhTSpaq`GIf2g|a*yn4b*ya`;c|@N;jLY@)8lP;AeJ37u$Q?PtD`)p`Ko%KX@b-*X+2|>B)ke)qfwFq&K|FEcIg<4 zI*%5Ldc4TrF1Fx(o&ZlhBELL$l$8KFDlMcZNh(oLEY5YR^vJ`}C1YG5EiEYyCaQ;d zWGNTz^#Kz5#1I~V_zW424j4b8_HP7pjX`*XS~nrS_Lkw!%z0TGbffxG$*=T0%OoUCVyM4H1X=YlW2e}R>tL9YVtn}=&XoybhaPZ;YlK% zL7L&$`E$Cz>)Le0v1k^es4ebsKGM{v7I*!|vS{1L7nr;VZ+IK`S@(G|xv+Bn^Ae;h z(1i!C^5fBgSnPh&(aFJ0O3YzU-D$liOann^+SY!cNIjKI3z{ss&x{5%%>*+Z0M2@sVrvJ zdE(13ijy5U!u!M(v@qc7xHjBIp}pnrfta|>){cA759F^)?~{B33%w)Mi}oFg=I0&8 zZfjj&v=98N^lf^Sid4&w5Ni9&xZA=xaN{m*v4r|U0!ma@jc;apuGyw zDsWY6_|gHjb+=l}P|!!pSkOTm$>)t>7Q)2nBy%yVt)&I98xFkc1ul6}yG|mY2fyo{ zAQeeKhfXfQ^>xW0A7IZ|Bry^Im|JeYV$_{Jr%kwA2tuRNZzjezZ+bYB#TIQ3vU`q? zRVQkzDUcyQfj-#%JBF|N62EkHIML=Q^UUlm;% zB4WZ>2eL+SWfUe8Jrqq6dvytd)g=;YZYRX1X$_bt;-g*_TA)0O0y z-#W#)20w;C@YzvuGH%}71m4`b?Z3YxhcTy4e&fsyF*1}j-WwEyrgGR&8t~ykIB|Mw z_U?LXpMM*RD7f`F5;$8qV6O(P)M!u8>2qyPc_6v!U?QD&w+*d!xZ}5h&yj96r;#_e zaw-=K-&0$WUkuZ$uS9@Gje;b}c!zW4Rr_)w8s!nmEdcbQ7?K|St_93?l& z+%=dE1Gn?ssJ9uqUvYijI5_2)@hguR*B^5E;s>;;#fx<+f8h{u{yLr2v|06k*u4H(#IxSpt;dP z&>9K+@fKOmE&sbx4w?*z$>4dTkEH*675n|pW>pOoUeC1=y0UR+`I`&~_T>FBM2n-a zBPr}{66Z}U`mPao=*JXF){Hy9m5gW)jUUC%$~}tIFLfz7)Gpo{_^DsR!|eCK9;Ptj zlu1-S{1D0wRWoD=<~PgDKCw2SUHUDf2qxhwj)_cSzmvHKmwkdSgP+sB;M>IlRC*F0 z0^IV?{ryn+uZyZ&jH}0mI3Z zq8ykp9$({z2MGI3@!7VWgQdQ%hM-BTx1f@P`v@tH%1jE}*RBTi7wU_ax}~qnso$_x z(j@44r4mO_p)5ARC)g3M8C5%jKZuCoo1>|k3REy`>NH@vWL8r!=stCeg0UZxo zope#RU>D`X)kj{9Ar@fwxwu!LQ{dpL-5!W|@s{#?R-QJ7?>4AiJzcH4by1OB4SIXr ze@Sb!P(`YAq2rNUNA-@fO4lihGYj~RJ4h7gvm9o0I_2PE^wh=SuOULZ?j~fu26tn0CR0GK z_jdTgksMoe;pPt?sZGxSD@rjKorjv6|s%Y9GjP7045*qygu2!+1 zq;-$L(-cU1RgR_y;SI-had&z0f*-^H{7X}i^P4odxEwyYkUa8b@}tp$!c#m?6(V_T z4t#q&89sjAgwOABCI6$fn7MkzLsoxEhG<3X-LjB-<($K<)SSbHw+$*?IR2X5&48_5 z^O#a^0P@A}t0m>Z%UtrboWJi*Hlzv>vsi%Iizp~TXoBYroE#XnFDpdyv}t7K{tRKv zVaqly===aw68ZGRo_N1mjWj1Ba0lkWT7!rKWXA*W(BxC?1W@M+st|b4)O3BDk^N-j zgGhS-a}^_iMEHh84cGq!CTznP@FY}&BQ7M_JAh~XzL)Z!kbL^jlpMwhTMxKVPO{x= zIH8pXko?l@(g4Q)*X%-iK-a_EW&WPUg-x32E4MPA7U@CF7U`iLgFSl^7OGnw%C`Z^ zwuab=Sl4?~ydC=YSp5^R5QJ1R9 zv0HD`^x3S;lG$4w^zkw{sd&mi=~`?gi|qxRNyLjQ&1e28GBz*f^1 zQ5(MMz&_!VjZ@u5uOkJBOB*+M>S9K1;;Q|Tt@Yv3rls*A9($!M_nw8l4udo-YsxoR z5y2Tr^qCrc{Jfede@@k%cC4JTX2gjngYgJ>zh-W(ggbxBIwz|6&w*A1mwWoGMXdDT zWW~5v_Dj|}MSS3SQF+nS%oWPL`Qw#h{bQ|xiKNz(m)l~Sk&FIW zWx=#W`0Qy|Z1DGT;u=Zm(7$q3Tc1B8c_t5Q zLyWqT3+EdmGaMXNvvCEIcJEo%ooK(vPU52^So?iwfsRUdkeaeScB{lV`;=(h@a|h4 zaju_p|Bf$5_X10{0;9I=$+yT*eY@cio#hB&F`*iX`1zd*^s_n#0OZpTIR^6sS|5BJ`@ z(HA{9y4wspo=u8*g>1;V!P4Go$^FR?7PC8+l8gP@xM{Zn>vheZyi(0G!5fFzL+*B) z;nxn>ePXTp_$U{P_uH%9+j#5$>Ni&<%}~}mzWiy2Y_UQ&iT-$t!X=<(bszg;m$9XC zw0XRfAZcElB`i|~KZ&P8ettB(eGc+E5|v?Ym933c_{izb7MM!5;cB=+g{_@B=)9f+ z4g4pMqV~tumnhwC_*14mQiWCPrgiM`M{At#ta`S1`NqG&f5V=FzLlL;X>eDj;Z{@N z$eYiF&T*t5S4@XKkps@`8Es)L^J8 zR*)k%isaPTYXASf>vu6U&Jq@%jp-QQdm0=$3?|o|7c-T~24S7lkD|Lz&_v1zs)8zP z_P4cSe;-o{IGpiLJG9dCOB!7i_|pCN=eGN+bO(jhz|-l#QTJ@oR|xvwcW>7ty;VjO zpE3XRNtlI|rJ0fMpY28YfDHa%(H2m3iPB{Bu0tHl;`L+o2(T6l*MB zu%gyj*P-!m)pgg`78~JT2Az20<&@EJ98hE${r$N8{Xw&x=&I>X;_k1AZ%C=*n3Zy!fEzD6yj^Ri8f6g$%NcR{TEzje3 z+k8pk{?mTDq?>bP+=1ObvYeivj@$&&OIXNHKmjKOB>wJNLZs!)*l@nh;1 zNJx_B3uR?2j4|jeXW)L%+?Iq>_ z{X0JXvl0aVD}}_^ONSenaWH966%VgbTDT$dSLiaA`y_4QvQOJg^ z;}%lk1xv7+OlY1d|FBN7GW6$<1L&Hys^3XhSo254UwXNRhYss6iai_<&nU7Hoz6>B zG~`c^Y%QlyuDIZ>x;T9t`-pJ|14(E7Bo>lOpM6DgnY#_HaW#Vz>Inajy%_L2*8oH} z;^Z&WylO?I&%p=%usB=yM0Cx@ig1j5EkWDlni=43`FO)^z`DXef9>Ex*|tI`VGiy1 zW69mQe^*(F=jBS$k%$35To?`(18|dfhS<#j+^MJs6GZ%!*$((i6!1N$^@;F9iM&F$ z+h3Zm4RC(KauKE*%{u!;OXTiU3-+dWma6^Hg|5QAnt|OP2bF|KccQ z=D9zz2yZdkxZ>})dHBEgK*0Hi?*^G>st+;MrnnaJbJSs?UCOpfp3cgnR&1BN>xsS| z3pUNmz0zUn(m-$Tb!h^?hd>|gOK&Sf^wF{o021`IpWq8o|KGo%nFp87XRmC=>)hZ( z2S`&~w8he9`r%ifA^R~F<38o<$U8*7dBXblev)zd(nbq&Irh*+INxW}UXB5Z{?Do* zk^I(rZWH#loQW)`Wi^LYNH6(A$W34IkY@OFwTsUO{TQE_@62zD-gKWGwK)8n@i5q! zE}1_g7F^?MYG0-qfH{*bBaX=iw%x>R+dX+rn}gnqzts(Q63Cs7_K`WfXj0uKv0+1K z|8CrPnBjTjYNP)wTK@fxd```Xv6(A3WO<`G*@J6RrX!6&nieNKUsBjzp7!P`Z(7Aa zl!ie@ym(D93%$!`eKQ$rV?N2lnVDGUjC#2m?p(f7UFkNpa8lewa8~MT#8Pw~_HNnA z>Q~c6lfN|to$2$}PTIS4KAaOfN*d;%SC4z>0wZ0o{}XM*!)}?DmVNKZ$W-fCU;+zM zT2vMKZ?!^+I$-lZx=)fu|G7QN_Em^UbriYC;iiTBRvuNmpzlL}qIJZm zwTd5zWuA}t2dSIfHa)cPs)bq!7DZpC^!$cd53?YumEiS<#*W$1(i&yW2ECc`FwEj@ zjqst8>ZG{s-2B-A&7z|>nMbjCEH(;vHt)v23nYE%#{MI$D%TJLmT~2TCGw*e{;ju= zOjzy2a{ERcx%=br$NrReaM|ZO?fu4Xh`64Pv#^E)7iTNIVe=mqdI9kK4(K%r@DmuU zr^4<#eDg^6vL8!_;Gl05FPB}(5K~p+G(0_|DK$V3T>!n?#}&N)9+t4LF{Uscf5u2t zl-f`4sY)Hr2as=h=wjXbWy{|#pJCs3Q>wMq#xg&hp32{5AIjY78_V>({*D!oE!m4e zZNi#bywB4)SP5-Cpm7SECw_12ivCKX6aC5d6y0C_XJEiWS`g|2fb+z#A&Ul+AjA9C z5sLLu6+N+hgES-@7EhT)tBBU|hLj0`!rWxQNIbt5hvkb+y!8gP=MtnxOgz;QEZZve zIz0Oy=4;lW&C5Rzy~K~|y=A&=GVhMxveSB|MV`0f$5I7kMV|2y#-23hP?mRuMYgg; zLd9JpLq%*t%UB|mI%(gtRMRW4Fi@kHsGQ3d)!<EGAx*fvV`@mZGxiuylWVy}O24_3;1B_H3Ep`LW_PX z%C5z=O*WGwosVW zD6d%i=h%mKQCo|Kca@VP`}E6wM{fAkQYeqb!KuR&Y^zXTyTo(xp!004;RalD)oxuy z8gMnyo20x)wUxF|1Wloc2s6qc*_EYe3o{~f_OIE?h%kEie&%ZC^#%H;X3F0ITV>p# zRe+~`wz%PRa;KElvza$_P13(*cfESM;hCl-RYH`7!?&$;@!-?exszr6z`<41;FkI| z8(WRd#x~oSCu!8E zVH?}FabnxHZJ#q=-oN9!m}hS0VxB$oti9Gx8{Ra5;3ci}n8zT&j#Nk8Jf7!Sl@9t^ zGVXWv(3-TXxwQPU-dLWIXj`~()}jBjsQ1zu^3aNlHF0KOlXdlQj?Q+ zj3&TLP;s>iWwN!*m;=zP*d}=cX7=HNs{MThtg;M%87s8T@e{vPPV@ZyJ)1@uu>I1! z#%aR6$%8C6Kfta1a`5Kc*}GnPR7!pYv~IxF*H4CMZqSD}t5yLk7qfO+SYPows6M3| z5PiG_oNB240R{B|jlY4&rjN!o?9bq)il}|z$Nyj(8ffVTU|HocwI;`xZL5-wQ=4zG zR;yC0#{GV!73-*iCRVjFrmM(3G(Z}D0@|o}D!;f81dwbM$*a^%Lx5$d(wk20Z!Msk z_`v;l2`ERYZ$jii8ZvcN!W-Lo2XziA9$o^Fe&`b&35y84Jxa6vCZdeCcoR{cKcx*- zdfl((3p>=d+Cy!n*VnbwoT+sckp|j!6-a6<+m{=T-BXv}K@e&Rk^j-C69GcqW>eC} z4eXZ}yQix~K>$j>>iL=nEY0x&bu z7Cy9}+x}U_cFA&VWdFNft&w{nP?4}c*P*x}=quxOF)#WI4KX>|vHmt=--MLb~4U+z_jxjHJ(ZiiEF2-en9x4|SmGCbC^H=o!eJt79Uop~)kD(-n1QO{& zlW@E-oh3mx?#*qR8r}@awRm3mYmex1-SBle!rGz%HV5+oijn*&1Ch# z#L17!TpB|{$3ZD8l8gCU=MOZa9Z~oEa!H3=9p`Ad74#&jWb_xs<%W^(CHfcNi;M4_ zhRYx@#Y^RW2~Q^#)Axl`#t{+Esw3SURdY#3c|9DMqlCj!eBpD(wiBYNshz}5bi|g{ z2V|j}%(OuKazm@+l4hk?ONYRxT35CAFTz|ScKX=UzAJXc;-WCqriu--Y!e>>B(eVf z+(D8}4{4H1N5E7iBMG=NrRFY+)Z8eENXHU~ z*a-R)^mrgAZU6)Xx#G+AI0*a=N!_K~LK>I-l#x3)xwn$nGkz~cMK>CcxiO7G_<>)O z=q`fm7vku*AkX<}t57!SQDg<{GGl7&g@b<_=c0zx>opQYpkIO1ZBH7EI)#6z4Kf%N zQtW9Igz6AwhT!swKZ7KCvlsFfdq0|e1pGPd&ECg3)TpMg`A3+kUr5!Ez~^c2eU7pF zBxUV(MNNGWTkRqrfK2FpUChDlNWty^(Y9VkMxq7|?56?n(WI4!tzXyMbn@xWZ$cJc z!&4#ip?0-IsOj<_ursaNdz7jpa!n7qEm^18WyWL$WKgE?iXkJb zX9SPxrCi#6u%k_x^gaw9*I(d32PrCg>&?rD^#Je;T~1hDZJcI;*0ki)zQ{T?-LBl<3)0wH_&LeDu>$sCg4BqNgbzkw)fv}YUHZ-F30d5+ez~+%iLV+C z_t?B6H7qC@#rIH~jw)t8Mw~6Ob)KkvfM^Ikm-*Z?$ve3>>?;nhV{Q;C)i;i6e&$(EvTnQx5C-# zPkL;Dwf9c>dUiSrt7fY;D&L68s>R(inwhkHR@u@2MNw<}pErxCCNc9C>A%x19&JgH zfh#%IlgR;pg>8od9Pr1()5fGDlNyAodh&@y&CLF@ThpXv-XPJW|1WN{Tz3967|-wRKsW0Ug8!HhfGPa#10K+Tt9%*tr4i1rzAM$GCKvKIGqJfm;WU9QF} zi+pbs(JKR{6lK^Tvk#Eglho7kaW9}jEI2=1uP99xbAKUD0IaoCDC@z zAuUpR241n=T9o&tKpFch`3vtG2mi7`UwNZ+D&}iosSbO?)_Qp*2xC{VTUkSmIsulM zA^TztvVA4TOT6pjgc0ro-rpJn+#6wgpubz7UFS!`@rTUejVezf1Vd0P22+{%n1Mn! zkTaX~M)^10TI>SRkAR*V@aCBXoPo@-EuUV%(jML0mkYr9kdLYs#5Q&e@QX4>9fQYD zq2wA{Krg5p7@J1A4Qsi2aSJF%0@7WLKE3}35MKiLXD<+WC?E~6^d_DN*FwN!PfwN> zVR0#M`ifyFzj)LwIi)6`TYUf4nK-(wEd|YE_U^epg;&(V>@d75tf$Ab!=Z$c_d)Wv z_g;oQJ8k(@Fu&fEoFQaY;SiB3d?Z!}E(q04o1*#hu-;S7wMxVHg*U*t=1bR~9kd@O zlSGrWS3HwpAl%c_MzTlY`wE0F1+S3IIETC)?Bd;~0_nmLb!2h#7WbAE>2wuS`Dqf) zDA}K7tM@rO<2Aev5&ABJ`FiXQL-rWWL6L!zk|Yo6$HHI#j#+?S{XoTQ={LUPN3HP( z)|rYIdpCZ<7YApKE+lnTR4@@KD?#~w0!9qNvfuwAz{B5gVF#kJt@1fDv+?l1Z^QUNc1lj+~%K=w|L-q+-@E7Si2VRTrmtp%*zlw)tv zE$5>ZDBLM3Bdm4l3+3t>)sF{!I?+F_jGYXta9%_bFd0i|qLn|nHrw64ZaH(3JSe0q zJ)RK~oqHF@9!U@Hg=j;0`EGIRx&VuUGc3Xyq%-Vbtuk(`v=V0vs6;dZ## zHa(v3ChbWUF3)fj7ky1Z*x7kg6j2vs~ka*UsLwVr(n4E z3+f-R57wF_{34{;4MOPlNGan!Q+vqndznanz*ka86(1cX3@`k48Kzx2>hxDk1g)&) z%^!V|_^$**UTKcNBI`NNR8|(%pFUKwh_7pQ&1h^FD7-Sk3Euv91t=Ti({S3WXSr zW5%6x@rM)XIJYWCe?p2`WuvtJ?ky+;Kjk{*b#ydA==!N?xo0LGFOG%3m_r7}=d)EH zmg;T(UE+M)r>GnIfT9=?mZ=?@v zcafAh3V6osRcKb%J{;y=jpWvD#Vm6>Nto6hx~a#_Pv*ovUx`$C z<^Dlu*r(ForK{k(F zdRFhU<)ZXz5lte7i&TzYhNhqg^b3b`d8uQH!)x}4N6>A|(P{&PAE?ni8HfxqmiHEm zr2-_^2HpwR%_$Pk1rB+y_fel9JqN0TsQB!i-$W2(M~5iBDlzhWqzEiUQq3-FH9Pdh zO0qRT`=O-MWz6ZG^&@qqXJ>C9`|_6VGN^4#S)va8&uAJ}c1JYB`fJp##K}exeU~2l zvVYDa3G5Jyp%}^+0humOHkw&NX;k+hRW>Y)L59!Ro8OahIW(~022{={Rf=#T%>@Dq zc-=yGzOmDH6`|t2T!i%2uhECyIOa(}|H-l!4Cb|^s{ic6W~aGf<;<0qFDNL_wb&1e zv;I(9tX-NWgQ4&ZdRd*X@lb!JMPRb*k!Fo(Kqr{@jwrma;&{p6srhHnk)1qmw|X_V z=9xGBh~jTJEeh1C0Q%`@*1huC)LTtdC9j1sdSFE?3K6&{N3IG}QH!%r83KUBXJO3Y zXRr=Xh_DWhrdLU#_|V^f4c2*eH-QiWyfw407kE)X)DwBDKX?;lC+h)kcfS0A-*huH zlVRJZ6RFw7nMS8c221T)R4DafsuQQ|!1r|>qiDyst>fhf1m791l$mI_?en21P`L~X z7`~QdIKMpX^!tR9-@x`EJes&bG-ax-w*lX8sClG)K%7`sWFvN~&cgX^x&f!xLya`t}H8z85X4Zpp$ zMYuD}2ef<|*?>ykK8Cr+%5%uTjmcBEq4Q|MKR;mj$x(4QVKe&SB86ZgVmoa13jDMV z#$v(V%C8%JiY{-5WUhnXqKne`09%>^aLS!`Aju6b4X;v#o~GhnYzl*I5g4N+b2#_l zm>50s9!phdmaY!e^LX(oZP>OV^GXIffOG?>-%qH1k6DK5hC+zB7J&zo`@J3fwiulA zhebO3HnS(e^87+{q#?yQBH6JhC5nAQoJGNnj#yCw%CkgIrTGnd4V$$}43l;Xrq-eF zm5>!-Bky9#V5(ttq90oPF?dxV6|vnS9$4XjnV0ov8}r2|Kt%qdvOU0L1?b#0UJsXV z1SSi6$DCjieb^QF{1X!G_`Bc7zm(IY2zf(vwWwKoJptROS|GKp-kzUwLRWF!py_wk z*TQ)BAc7yrBjWxgT(&0oX76{ zCk)UvgnyAAit=Viy#db$0$+$PH^L2R3-<}Urg<+G&Cb6fJ^JgRuQ{!wY;y3ECaNkq+3M$7%ev$Fuie& z1T}oeQRpu1-&WH7!Xx)(trud&zTFdswmzVXpCDaxq3@TMkFg^XrPssVS(bNnxE_?A zV<`9>pPVd#OyDM|?j0w7T44jEvp2#za<3OpQm>hN|4e!Q4S60&?FkW&2~hxu9a}!X zeRryvTbo|gtbbI%bZE?lG8x_wboknSPQ@fc0PC4k_|9?R`7uSkX(3pDmmm`iSMX>c zYaP!_58E%R&+Rau9$$3idZihRa4EGY-BIi3$3U1$7EX^&*Uu_c1M>!!=Hr>WG9&j3 zF&Q93xY|7d3G6A+s>9zRuQgg@C#nIDY>`>2XDsp^P!$PrCg69R+EffZl$ejVkW%ZPlISwT>jM5#n01bNT6@qH}aCtJ3Q>FVlYd z^or?&617HfIwZnV?b}t(zBH_`Ah0#@>WG9V7X|1{&IpP76X zJ4_?xG&zM{H>oTnLP*G)5lt7V7=#!hNOpAq;wq<3Q1epWj_lYxTS&Z)B8XMB( zEPJ;=#H0U0JNKfn@4x5}i`uf~WemGbc*WXi$4_A-clq}K22?nh?$2HyEYlW{dJHV` z^A$lrXwN|Zb0*I5gFe^A=f0YZfvFeT;G7qImTcZL(y27}o1|LvnaP|!ZvvQ3A;b6^ z4LXy>j)=_iA>Mvvd8g+;-MqLFqDtjLO_nNfhZ@s(5927XzT*#wK3cp1e*ASXK;AjU zH3Xi*e}D?k6wN-wa*kg`E#zmRpD zz2`BmBqyt8#fj9z+$nmGPw+`*JeblglyLExEiep7aGiwsqXDx~YGKLRFfm*MnMw7B%*q!7D^^yVJtgJ( zC#rP8S&^9G1lgtASan(`j=zSbv%h4~=OX0Huf2x8fc*68==06TK|(*F7rSN;Dv&}J z0Q?qnz|jVpQa2{a1nxG#<*KJU6OR+3V}JwG6-Y(=6-av`cf@E{cW(Xb`=+Bs19lAz zi3r@BUo1^^@6vQI%I8HAUX^~KlEuqwuBfDqBC8pDlj0R%|1Us&v$S;ocQ{iEY>|U=E zFh(MISbs_i%jgnOf7dIa%Ln)RA!RgAF`y|+3gwf>xxbQM;si2_YoDouy?McDNvG+J zl8GArM(2*-`meA8((YDNT!?3izCy$@Riyf5dRZ(%<#xR4bxLHleslmZ*sJdgp5MBvsB zK2=h!+2#OtrSl*=rbp1*7&^1u;%{Sky{e$!v#K5Ms~Nxk#T-rjh0f?=p=b^`m`N!c9_1Nj`CvWo}2ft6QLoGq*!URmDvDHX~ z%@&afOgaIPve72d_ex@Vxob-fTs!aGxzQS4A}-8HY_%8)SrzxKCzXzqqXyfCUGvs$ z3#o*(UTS>RARoZK%XWa1?mKORVa&Hoj(4;f#z@H>V!y;2Y6D9vD6`AHoleH#vsXN^ zsGf&?uBl47$lu{QON>e!km;Gx8ll~#oc?`CSTiWk&v4v|x;6G-sZ^&Zn)9yV)GE*^ zXyRyeKQUV59mKF2;eb-2$Tx~j2HfCw&Ar?=N_X?`{WeD& z^C;SFRv{c+hnC5UpkL~G&Lpusg;g}yW72Uyc=#C{h2r$sex=7TNLYLnJ)PxnG_&|a z%g@wJlw4mER6Zp?m1XV1Gg40HH(x(~RI+@C?sL49`MdHsoB8u=`YV{8S$0+698iO9 zj5i!TWt!yP1Lf2JP)-R9WS)t$JMZV5V6KW`w(yK#o*(rH*;!EWx0NyRSC;6wQEyS| zdZ{t<6S<`Lo@S}LtwOc#!#Ih}==g!o@c~A&LiF)kG=YpE)ZyAi%NEJOaXrD$f~}j( zI^1eCdYVZde>+AyC|a@O#0`VphAOK4@*@26YfdDKZ(aFR+;S~Az0~+LHNRvAtS{3_ z?t0=(uAqs3;B%(-k~)lBRX7P=zu^#-rCQECJAqtz!+Zu`$+@E1iPPP=7i4gw)adThLVb+22TT7l~T`C zj&?rEdu1byA%Rn}`i|P>!T9FOO|b;Ks@YcLLF1TY>lBx4Z1xf8ezR9B^L?Ld2e+SVUX{I-AD#~`ct6d6r zvunn?DP0m-zb3~PGOW!tOU~PO@-H_vGED0~aM|ZvVX}E@My0GbMF`!?z> zGnH||O{Ki;+_G@mEPW9(;*yzkSJP4E>mppqTLYCWwL<&cF`8q+VpBX3t39lhqiO!H zCcqO%6$w|TPsj3)JZajR&^&Fq7GvA4Oj)^+N47_b(%2xyw@y+GX+g z?KYNb$JIB1@pIxO{{+ZV!1-Q_3Rk9yWjMt>SRq1Wu~*K=gPh~hOF0O|xzpk%QZEB} zEt33wElZDiEv>H6uAr+=W7U{3KTy@ImEgUTE#2OoX*f{d81Td2HQR(f7AT59?p;M+ zd!%v(uNS2Kxqk_w%Br6+DYq85DP zjL?)e#FPdjV!lbRn2bo z``%Zg30}gKEKRJSj6h<*pouP(@knIAJUt;yjDTolkt(&nMPeWVZx}oBe$oVBVE-}p zO?|r!CU&rPih75Dn!%AbMQP+`!GA~6O9vgak{Wb5kij4y?lR^}D zPnGl&heN%$aFN>8yS0;6VRd~9R6U*&#h?3zdd4p+y&yJWp^VJeyB$F4ML2Etb>m+o zP+ke_7^=+JhCrk5Smq$)$0s+hUA&x?-e0&D;l*vF)>4BcGkG=gaf{oXU*Hj>{JO zf!1|Dec$ysejKGf!1)xaMn@OvJbtkh&&Rpi*DSJtAhg!A`s7l~>$BhLI&LZMQ+%_3 zkUqf@9YofcDktbCM8N3&kTZN>5WMd4r*K_8MGTkaSFvpSyP}%O*b>H<*6Q|C;jsQLU&~^>y-M*K3FaAvza6jo$sR@Rpj&}Y@Rqi=zAQTZp`B$gBU|PD zBYw|EhWo%fAJd{L~QhUpGT_1~=WbU1Sm61K%IedfaD zAsd&fR&*45GfAVfx<(CL0)D_^nS8vk05^U z0onub{TEK4w0;&u277S;N?w@r-G^O#CZEV9esr&na`r3a?mO+}s%^+BX^EZ^b18+m zS=f$QJSUDvBpL4B92puKjou4Tw&$|OEF~W)Seo;*s;6+a+uFl4=xzpE6fT>3^8`+| zZ%Hl+MPMF6jUB5<>=DN}+yCqdH3gB)di9ZvZUmAsHusZ^$4z93=FCbSSNEYx=39c#)we+ay%g+3TI)OE=F-vf6NN%em`luWl03-tV3VYlO6ORdi)n}GqWlr9inC0>=J0p$vIn|x9<;lHb%kpRmHdk|P0tjq zJKgZog}a%H>A|+K%*M5gk{Q<03zPzG+;Sh;&IQ$$^&L4(atIkX2^SGKJcUx&+gGAF zT4xiFf5?Y&Y?Le9=>hyGG9jJq7MV2Op()+ICbHQbW$sSGm0y)Xxhqw{b8dq<1>rTb zhVQh89aS{V2{>3UkU5Jky{Gw{1vz>6|J2E(ig(F$Y*L~Svwwc(*t`MQ=&CFsrGzXmAXKIUn0!;YQ)U(YcgN5LL0vNoB3?fEo9dU za89tjwV58XpxZm=3UmSeK-&La`HLrbz*s5pi4Xv#qP^ z9c7Pa91pQ|3wkOYL@A5$Q00w*kB(5upE-Aq>Ip{7g!UpTK7ago6Irs~;br7)wk2U7 zqf-a-ixnq@BJL*OqBxa}qNXsS1nd;u$I#EB^s{G(%#gJ9Tu~7w7^x#fK-|T*mmLy% zpSV@2KyoyMYG$hI3XIJrD9Lf2NhvDSv}!Z+Dqo$&hU$JV&6a75=czV+I}t8#;9EBQ zF*kqx!917+Rzvggqmup{-I<1WRf8KBqKgV+zZF60z2NId*j}X%B5yadx}OqxR7LdG zX1Q&D+nLuvzXxB}s7A=4!Ui9?o6T=Qq^EB+zqVE_6R7)lLyooydFA$V`Lo5UJWqi9&DlJ@NjHd5+Yf_Y~^E8Ak3oSWj;Z)M3d)yUiOQv&?DaFu_r6Uj zW|^%xU4Du*X5f+LteF#%X z7;!Gg1)CqxA9yNx&yqoM)SpU)ePfr>1x-Iz*tyvUSD&7e!v4;00RDLbxldu$Z9|4^ zfF_8$wmodiW?((g)T zn?+owzF+EhDQD4IB7bk4Lx<5!{zY_S3o~|f(aAL<@5E%_p2MUXWm{W{O#1!L$!jOo ztq4O!qbYvTgk9&#)JfR=LNpBGG_OSH>Ye`<7xp`?30OYnSH#nVQ6xUJ+%9HglFmVA z^hMX-G?QaLVXRhHNG~c1u~MvbHfbkba0=Y-oNDD-B%*Vpoo@8aB_{3;en?NlpK|h} zU687x-G}ia-I}k#zIdL!dnP7DVk$%K%-QEME+)tNJ{*m$Y2MY@*%sBCC5GNOo+gJr zj@fHJ{q9}c?AFae>aH+s=$gRe(v8BZVcI+1`C^ztC$vNB_plxoEbrQfm;4`=$ATv2 zoL&vHFxJseK<8hc{?1xag9;>&!Y$ihlS@)-B2?EUi=B5$MG!iq+u!T#^kYYXD*^Yt zIy(*XZs2?V?hG!_9`6CqY>P_VWa>KxSY2>5GOMPcv-WFz+kqD#s@2Q*&*kQub{ z9vDjoN+AHS4?s+V-ay@mS-{>{69*by?gh1t_z%^K-tkvD1apstrF!n4yrc~JbUSV! zuL|o)Q+%cn=_qWI3;2B~Xk@nk5$5jihqkAitFZajF#3S^(OlmpFL~E_zP8C4s634ejO94 zIDsAnRLZ&+XkJ1d(g2D!p7q&($-sRz)R5q9q24x*w{%v>IZdVdme@TJ{2d3s)&|H( zY8E^XMflO}@xvr-v-ALjd6{`2Tw^!)3dXAkL7sIz^Jg!ZxD8NxiBDX?)d5Sm@cnY& z3luk-LHPwh1js7~f$d-%a0ismZb2QGnUX07=5>z2_9qXd_>KAr@s@kO+taL zF5Xh<{R}_;k_EoGbad{L-Hm>0Y{p4qZFb*COv{qLtbF0=l!ghPd2|K#41>4J-z3GQ z+|pi`VKRN10=4@?Mz-`CsYckD;3?N62ZHim7f{C4Ll<>=LFu0p&|tYiUtu{a^C9Q@ zr@_!TAtEBc>m~Z5LO6Q2M>k73+~vj`NZa+k@()m60d1Wf@@u8aJ3uPEBL`ydS;9$h zaW~NQH#2JhtUK`WrLdeJ(Ehd#)zDB+NXJgN;r<8Ska+(z_pRwwOXQy-idBw0nW7E? z=iwjSL3E$T70fnDYQF8-GA=bbo{1kCf$m2%3Tm|M)fOxBd9Dk3l&!z{BdPv4#%xsu z!peT?;#<${tV`dym*xv90>6^z+GFmi;mu+opNdPES1Tlr1IERmrc#Ef%Q_7A0pBwj zM;CBOBpGb|a`kWZ9cBtj$lQ&!Ko6vIJGVLwp3}Z1bVrQAr;aL&=u_v6cVQ0e zIQ`_2IqI%Ae^lr3XAuFzWe}C&5e!_gis1&-{wy^OD0$i_A9xF#J)7ugh<^(&wy~Kt z_Q%3JQ6F%SUfa~7u~WAF0bzynKg_~?bhnw!!K)DUYwP#02Ocl9 zuNxYX4|Rc(ErV=zwf%wa0OW5<>lA{itmJV zW&v&?AlU%S?e*wOa@pUHq;VWy>iZ}>61u>h<}*KSllzpfxn}rX{7P}FmQj{PAS-baKv z#DV7`$WfpqD57;EJKVV^OIJg($|s7X#rH%i39}*8prS0_pgzjqKrVNSn{;$2++WnG zuQ*;gA4*g5(-uMd?mKY>)PZ(s=ayq$pf?N^l<6vl=|}zKq$x7&1nvPlzaje_DD5HEPKBdAsP3z^1h4g*3Vs+7O5`B z`Nh@ZXY&1R7bdIQWjD_AWW3_y)~efKwTJ#b&&3B{s;o%fqqdJ@je-Md$A7=!C%yhj zpBE0zs1S%qXIKg2nvbK8d?srnp@ypnT7|C^>+9s4m#IP?gs&jBhpn8UcFNu7`JwiV zq5Ne-es%d@*vQR3P9y#{PdWqjIWpb0`%GJycZP$9Oc3XfjT&L7{|(3aZGIixDv0%MV>ZNbsCI@aq9O9~Ziw#YWeBN2g3r|i&l zxK7oGY~>?o?<)BLbjDV*cS2su@Pg}ih+ifOv*{u#1J4e>)IjcTvI5=`?tWa6FQrrlX8xa$%L2IR=rl@ng zla>DpXRX2Yfh4Ihf_`Tk*8_D<7LO> z>N9NR?V$6f)+3~or-+D`ko^+BDa|(KRtjWJVB>w=tlaz^Df@Fbg{YZMrc!>=1*%Ueh3n z92sWn>SQEY#sv|Xkoc}V;;-4eg$72$0n%3e!JkNV zp!E^>00nQCzBR#F0DuMsg%2$3172A(i<#R%=EzgbyVuaoPH)CbtNP8(!ZX7`oZmW~ zP}GOs1xfDrzRvfI)vhwC>nste|pD%*eRs`Urs zasiJfvY$T0+SCXZb5XjfGFef~xdjR960>HxER&j?{D;xbyI7o%HcKCEc=b6n-l42Q zFF6~!;0RZ@3iTSccGp;WF2d__2iTit@*vaW@YGI;Y^C{0Morv zEFr3EI1yPIF{N&)w_IG8)aVHzz7e{ zhZ zj^8{Kb?LsRvJ2DBk*hSF_|z(<$8EYpj2Aw{?x42it%M`VhC)>lvH0yNT%Ybq1bj}^ zj(#VlUk)PR=bvVMoh=$#nx48}VbF@+Q*9iNpm$$cvwOfN=sRkP9P=ze@fL`$62uBK zs*KQUaK{vRIwcyuA6p23#5m?bZQWG|4^O83jU6{Tf6z`-%$zt~#@JZ@p8w2P*Q!UP z^D~O*MU!zaV=%w_eBC5^r7naBbc)O`;7iMC+~eieJ4?Pb=(tahP+^2XW~Nm4$3Iun zd2tHSE=ibGc9`V?18uok4&k7Fb13>tUbpXiTxU%sKJ#E&+?SD0N3*@yj1I}nx*y|| zj8V)_t;@>QeI|L`eoZwdEc(&u>eV9rbD>?teiHL|j_`0(UH{0_>=DaBGlQS;13A8+ zV5#%#0;259yY<;Jb);g%ER?z_4{+VGjkV25U5bXT_an@t)%hj)OenDum0zWKK5ibe zRE5xR>lTM}ZpA~}mTklnNF^`Ts)Y4C0x3Z07|gK;dTU2@CZ4dqgD<=`@2%=o7j!k7 z_jp>}M!phm^5u&BN0;UP{3k^mDZ0EmeQgz?C$>zceV7e@A?h+Hj^MS4D#0nYc8&Sm zMHu+6pquM@QW%Xx+Bu;Q6rApY`qP$ra`Swvz=6}z;06C1Ra@`kzBG*R5@7sb2|+?a zqH_f-<(5IFP<+g`15n?^Q{|K4&g~}45Kwbyj;u@4A*BO1VKidCA*yMPINQnKwo7qj zu;?|7V#V5w-9UKr_1(@;l|7c>w8IyPCEEcJ%<--&@jjKo5639{21)W<`>_~f1h*Md zT(7+*9zo>dfAsX=Ymp48q{d49rQE$yn-NI@^*8i_R(%8M-CNRjUDA-8?^!|x7`c@* zJ{O7`a8ENh`Y;?h8)5H7(8l?_p~%n-ISMmOa!mh_m;e~F3_j7(NGUP?CR=h7F@V#m;{VLjle z0#vGjO8vy|UMg&z$als&ew;F6`sPdeuE*)~d({PnR8?j&!Xmhx&1!1?ZimF0nWLF5V9YdN22QW@QD&|4 zovyR;DHKos6xkn}_>)-oDndM88)cjsUFR;{LfFS+OQ(KEq_Ek!f#1peAgbv&?hmzi zV^kj*nj17%D%)&q$rUP`cV+!wBUQw9LIjPny1PkFGGvi1hE>*O-*>94Grqqh`)phE z^4JjxWPa)0{-&a)v;A6l^y*HxIF$VX=!qgOVKBaC z)Q#&YVT}LzWIw&He5o2#hZ|F+|BKXm;tA0W`B<#xOOCNr>q1`qIKkpl+1K4yCm4ae zV_c5g1R;Ubea^w)VLUHTI;tU1C_2@df_uo+Qb=zoFA+31njoGM#jgB9AphrDa?JY6HCgdWKFkV8l zeHnyMI5Vd^5yk}9MD+FxSm!ZY%5#~|JZ1Zs91hOj9&N8YA2XouGGZDD!+-zlicP+u zoPA*HO9jWKSwV<#X_Dk5iIqQQexi8VlcNs^reok?DG1_&m)ZKG%($No%WM7(j^uOg zuL4=IO`Z}#YE;)>)sb|Q90TW0ZPJJ|1R7l?z*d+p}72Tf~luK^T0%JaG`fnYwY=O8Fe%#l-jGYm72RJBrN6fd3~GqZi(;J{;Hva z*vIeaNA06lxl5+u1}XEp0sD~UK)Gul{{+aH`Q-A{#7642Szp*Pk3JN8zfI*J|6<$I}7*hD;K6ir$lQSKCp_OS5LnF6QW35^3 zcZ2I+OyBbJZl-n%b)(CVPkjVVx94$Y40$t2;@UYg7uXKeANF+77Fm8O+Mbg+A-2Rh z@1NJe9{59eU3Dz5P*L%laP{wPc7B-u1aeCMJqT8K9^&C1rC&EWsA#%C9_-1u`H!6( z4fwQ`zu-GN+NT~Dxt8TFMSzBiX zoZnt%I@1BW9-K**Q@Fxzjqqo5Gm39Nk99gbptkJH4EU;}OZjwUN8ZApBU}{`qGf27#QAMkXim3{4pA>}Pu@ zF+P!xC-d>~Yf1iU;G)c6SBJN|-s@*&J`=xmQNMu?U0S>5o>wF-Lo;v``yH-r9~W!y z0&zGyPe1rsxL?E@bH4POrM)zycAp-ErQQlC`hPY5(B5BY6S<@bc8>gI#MaC*%R!t@ z$U>pL#JD0nYalAv;rLr6Gm^;UnrlV(-{W5nsQd4aLyy0t{?*P@K~p%%&jKPm|8fKz zIkZ>zKS^wiQR{4`pieNGVsr>K_0yl!c|S@3iP*#^{j`fqsdX}q14Vx!g>o-)k;X+^ zUj#_K>N^ZezdR7mnz~jYrF}!~Gw~$}q|aS`yM7EX+3reZy>GK4I?EE)K5d!$(tj4Y9BxDjHV%@vw7VMR_ZXK2R{?G>doi>$2k?;v<6*?Bae8!ao( zfS>47QxnxLpmVS!O@+qm1HsP$c5FbLAJ8EO4Edoz7!=$m6tl9Dgkc8(K!uPOI$0E- z>H(?4D$hKnB;U*#}NRiUNCdnGDnrZkl#-=gw5Y?uAG9dlo++mfgx&- zSAcN^ROWhZn%%0i1THEP2ApHAz}m!s-iIY{%gWm%sC#i^K?Sm-Od?8P&-hR20l=35 z6p*1*yydwX-?Af^>wyo?AlB%&QL|#xCrMvk<+{Xvoo`&Mql-_k`!zhWUGklv^eC^w ze7S1JrCeOIH#B^)JQA1wTI;b2;xf$-1Yk|r?)Q}5DR>!5!82ZhhrOZm2Pyen_8{(8!3VYWBM_qn4q7RHFO zXVgfo%&xPoau0T~&z!gKMMv6*%;Zzs2RIkz1v7MJH$O4-MuBo|~hSpxz%Vz~~{IfbD>7yd2@5pnO)GCuTcPM$s| zo4*_eYIR#kF22$~&`H~DXeJ2G24#OuNy@!&_8ArVlx4-hzqNywSvPgC7F^V<@~I`NJMVg4;}g4y3fzxViKgdD_2jqL&wMLOq*)NjiEA<9W}w^ zDxozCzcfzf#>d`^o7IH+318hrs#1pyA09?b+d@NhwK6v!C$X%bevrkfSkB#wq<9x+?T}Gu!;zw0 zfW`*?OX2?d^YT=yRPTXAQLy{kgKz>`8`!8*&RQmFhPl zq2&>8#tszcS=yb>?e7nzgO?JEQuOe@2n*QyJO018>OwBgLisizY zOrG^=Pwv#`$Zo9aKPIvaMYXRp_r@u8Njfs z_7upT{R!9-zXtkt+e2ifEy*J!BdOhmzA$*x?-IM?v%uBXqZZKoBlBMW!s(u~fwJ*` ztb)7(Ponr_>ZtI{Vgyg|?C#ze$VC#;FhB9Q(q4^%;GTSUjkaS{gbldO;2syg;TV)> zVd(0P2URFC2F=U(9~uV@*`DYfO2%nF<3IoLxsE#2dsR6|Y6UZe3NCC0XZLo+mrAf5 zW_00Pvpm3cE!rfB=~*Ig&g~U%ic$88`wK@_5h=sBh9(5z#3S>N$|8n~G6q-r#TQhK zNgqNAuohp5hd+Kr44cOb-r=ABw6o{ebLmc!8Q+2v9U+D&W5WJvknyFd_p{ij)PQlHaNumn^7+68VaqVbvKVKPsZepjV8`oMEsVU8o>!w_@RH z6N{8Uknm~pv$f1S63~hC6Qg7XlkLAhNScHdKJ*fl<7^s?qrmkeXEY)5PUeB_E9af5 z$dK&a=Dvff#v(`uyj`CZW&yR*@P`E|!65q8gQ=&yq>#+F;+rQ^OGw=B=kIaj*^P7U zA>ZXS7ji#?w{QYHx87B7V7doxEXgD5bkp+{%n414vm?PC$GWfDwfxci>H@>U&N0u$ zZ;eyr-Ay@IBJUKf{c>*p{@Cc}Zw7RY3imb$ci#I)y%FKphc~u?kg@+UI8_h`aU{>W z1-(%^|8dT5riY+97-{i~_KbZjo*LKQ z6JsD_6ec&Pbk=2BGj2x94-@ZSHSlFsSnqUx8RfSR<9h3H;gyXLBhNW~fD68kBxoIRvs}Bap>Xo7 z$GLF1#dhkhB)VvWief0B2bnZBUT#oN1&VPW1a?=w)rc>u=7VQU{zz`QvfFWNr#@c= z{t_i^VL?Mg+-Dy`!nbJk*mw8;=1Jw0MDU|eiTs;k5PY4|;m=jK)<7vUgvkaXRKb?`&rU0&hY{%;e`i%)W@dNAK{#`ZiKA7*6?$LX~!zd3cnbQ=45NUY(i02-NCbROQReo=T%ApKe&LG}=6SEZ@nWpz#3{7$#MehF; zH4nnyDPpoQ+<9h)-(`q4kbh6N13?(9sEp-2hCiYcJBh%DR4z)86Wa9VB-%>lgbA+E zv!U`_XCr%&Z)12gx`)L0-3*`mLd*YQGJeC6>FK4<0K;%Vt$fpKs`0I$_5reZMR35A z6E2@`KZd|+AMr=z!hneYqdRG4r341!qDZ|gqlhX>^bvY<;J;d&4qa%+SK z1<)RIVaYIY$7fC8^sckxc!6AScJ)hPvwPX|OC{j{WprWPve?3H*bCr_>Gip6&OL>0 zc4N1KTH`A?!gL)t7@N)%Lx}Q zHq9BPca}2<_nH6}d&ck2@Hq3kV^&)4n}Z$PYA;+3SZWH#y6Asyoh|`wp;-F*JHamO`BwJsZ|Z54W7%@+;5@&@$3Tc&+u<$o}=i_MROKkW}6Xo z*C~xc)G!(ELlmzi4EAn3UJly!Yj=PhymL;)Ds-5MdtPm5 z_r`~SjiyLwmt{ zAKPs6*XHW5SK}$%(fupLf_LxwQh-Z2MIjFPHof-a`cX}-j|$qx)^-#37lTR2hO)pa zijy61482&e0EF-Sy29<3|JYAD*8R8vof;$Q;~I2OWCjPqr)-qW7#q~o8~$i7*S)c0jehT01z7M%Kn8z1bAwQqSC^IcqOI{;&X2k?mKjW1EiKjn=J z|AUXF2Jgd6k9wlh?_$ZjT=97s{#9ck@0+qo@Weq=%`jqa)0^Rg0SFFolK%8A_`kQyXdbNPq3xucKYVXh!hQX#0)HD6#K%ga7e^H*wzrT| zeQz_<1j(R&{WcrSM@A}5a(41Ig(*FWp^;zm8<+=&S}m~*5JevIMc+DoRekb9g5N$ZsaXdletpVP;f01WA|p#inC_mDd}rv> zTT6`MMtg=`3oGSzyKt!*6|AHCK|~AVE}8aPZ+YL-3dWBQpA$^RoFzA+=H&Bckf=5r zZ0STCg?0thrJs>v!eIUR6N5aJMyPgJc=VZJQI9%dP<=Y!QM<&+Q9JZe!YEmO&@`7w zQ}dv$eOQx_MmN|#p|sCxKxXR8}O25ENuj??7qQ1w`EPX0^8R7}(;rLkUK|j_bsCIB@ z4k4^GkCq^s-Lo$&x9x=3p4=g*o0iJb4)2R-2qmTG*z8$+ciCxd%7h0L0Qoz*s)!}C zpT`5bwA^BnYee=mLVUT3^?Hn&?E2cD7Z0S{ZJcQ!0y-=^dIB^z+gdCqzbUxCoG`N( zUgi#GGzE?BnbKbWP%XXo@k61Yrb2wM&qrOFjgWj;*Ty!I#yepz{PF3kKYlQ;tN<^H zsV!SF1@$h{d^MzOi`BRsTPZOzVaKDqK_f`oXJ157LbY2eZUOUv0Q)R`0hJmb<*%EX zJE%Rw7PP{f^7ISHfE;{ta}_CshJ*91V9MxfRe}YEnRq1F=0gql>`T%{k#; z%Q;N%CPt1me>cf!fY*Y~B{VSVvaiNuyyQduY21x*dLDyNqX_4FLZ+RkdEs$m*mc!N z$@SSQ*e<_rNyvM)pq_I0<}K~-K{LLT&D<9uf2>f)e~jU$%Xf>CdVr?-gLP3|y6ly; z2YSZ)nguBU&&Se?y4LH9U2u&T0I?_4u@DcwL|8 z9&ex8gSDJP2z&ym=#mDK2yc;4Q7HG#B8nh1E$rjRO&-ga6Er_!Bb&iC5Z1k}KYPSt z2S+s6O=G%LgXz=|Y&?Cj@WB(XzJZ6kzp!T1j>V(nFc_yu7fH+Ds|e=M9Nc9ntxs}9 z;?a7QS}H$wuz4GO%XP*)IMcod^U_D229c+%~M|#cC ziqiYMu0m-?Pcvu_hET_NXyEr~v%^fQY72uc4nTr;YS&Lmb0n}$BPrtF+f`<~P3Bx> z$X^`=Q2ycunK~HVC^!qt!G1AF_^pcCFtUYt2670{u=x!0K-mH)BZc!r!C6IBP&K&~ zz-E3WGH06g1a!sO>2OyVRtv!J8>wCizET? z@br|nk9n34-E=$!H&y|CakjvP^Wh8qx1pcR^+4D2!0;&m3WLN=ak7n0LAf6%nF(BRH^9~bHc+_h)s~Z~$CD?@SdLhZhSxaI<&)h0fpv0;3tFPm z?Dfjt6HxmEOx8GFi<4X5gK5ve?yrke?3O7$`z-}N2S0GbTV%0^F66jWtLC=yjBx%4 zewfIhCb1qBG|rz8Oj^sh^8Ppu(j*=67GBM|a>yp%@rgu{${sO$UD1oZqS-~*`L*ge z$a>=zy(kRk+TiMcZMn}OavrdKTvwgM@rpyWy8Y%mwAf2=)6`+QcfyqY@1%3BZ?UT@ zx7MpUvle8=vT3&H^kXa21HI@eifAzD5j_c8ocmukEMj7O?kyR>XU$-RW`rjmg@H2mP*mV7S5Fc+mf0+5jqna zL4rB%>`pJ9Y)e}nqi6yyBXT988kIftR0;e%kC?G|BwouvMUjia6i z;+>&~9NS^rQcHiZqSp^l9gaBFd?5B0-#kjZ>3(Eg;a`9w?tm!ik76Jj_MS^Ikp~)3e_C>FJ*@6|_!bfv-HqeNCh42Vf;v{b?E#+0x_ZA*a zoVXh;m3^+ior_nqDFQdV(T}guNR5vfDhRJteZO8M7PlXlo8TUWkdSy?hkR~s?0mVe zc~u@exjCYHK6Viy90^BDB3a66sM8kd<65BCFK%OEZ(1a~e;?z-p66A}Gv3FCn_Z7UAiZAW-*_-p-oJ+kLN{dJo(JiG9(IOtP6wXz78F|oU5517-G` zg!z?rQexpzQnQ@F4*&IVuZla8TcYQo#)|iz@3Bd0MzBaGE?D_;@fi4sx2Qwis>0-h z3$S9e2`zFd!&>j$3d&*V`LsT=^XWNQWTnNGHH+%?)m2bp8Z0_mt+E-8Hdj|ztm?$t ztP;LA=hfm}!iby=jYVjaWW1m$aC3;B9$+##JU*myceFq;K%8+{`0%~p#gAdtauKP( zR!X2*FNR{(Z-}*d>hQr~vyE`+^tZhO_$Q$xC~daRs1o-%#cRobD~?PK0U34R$Z#fe znx*tF$qSprGb+~)_Gt@T03o(06liN(^K^gL-2(M;hkS>oC52S70gbdzoa=z!mofeC zV}H>KRS|w^+cwRUS+B*Y2lyJ#$$Z6{-_sU|J%$`4ucTNcJYz>63ZIq9TmHi7VbZ7N zG4jjh`dm!Ile-|D9$hH-t>-%@RD~5Jk%uV< z5$T19!Mp)zGrUd$*jXR44);1EBBH8`d>r2hpmO7yIPGCoE-bV+hbBhPcrD0vKuLt8f$cU6SPWd_P$oQd04p zQ_wfyIjI|ftEdszH{`52^OSeV^cDA1O=R}i$yN3aR0_TEiRb9CV*UN$JuEqSh!wHz*;nAEwfq+XxiygCq+H4m z%|p|&0Ys*umA|uZqt`aNN9jAZ^T+#qX(D1>Gl%l8`9r}BV4 zgg3PE8af#C5*X&5FxORh)il>A6+5KLXf^9}CmYizHE>EY+1Lq;QqP2cW1FVU?lkXh zJMilrkk9?*(JI>-&?;4TOUk@39}@b~fjDZZ$G+ubEwItn$e4}z@yPg=E`81quNL>DHzhcHuGTDtA<2~o-@Th zKjFsRlj84X#?=OEi?FgXkA4?VZF+LV(oRy`oc>IG#=D^Po3KqrI<6&RSNn5qOYGNd z)-MKDaxXB4cS4zW7d&A&P)v?(0x9xgVDkbH5#q7F$|D~E#k?7dLQT;sx<@)1S!w+B zrrCyz<3N(~$n%AD(|I2uY!{-NKJcg>)KjCFu$5_%F!So3a8)culZ0JBBRFInN~a$= zEf0si|BIowI!n3)aZ1{zLO^<7v4ZU88U=PNf}~`xO?asyfS|8kb-3Q3ImptmDb!L= zD96%Nd&Im%7YifuglQ=FDmh6Y&oy?U#3&k-NpFa0w23*^f*74ff6<8f;UOV($jNxL zu_yMj!iDqrzBwer}aw z!u@yn(Myj7sN8WbI`J3Pr?wi?n&};zQ<$1V&_|5@>79L$oM|xMK!~NqPQFQj#JfzN z^F?0O;Oc9ViAQkU1NZ77gQt2F<~ebV8Iy#qj)$-tGz>n*}l5H~mF6>GO;4f)pJc5u&7K zv3fB=Lw=>AL$=YVLtf|P7$RrfL$x?KPaSJ!!8k3yk#Fbe*lX9?mf!imsQ;i!TYh^O z-b$U~*?(Z8gxPm`zl(>=SmGk!F5mxDBHF-)g3N6Y1=@1j#r^Yu$yo3v;Nt3$dIhF^ z+WfUwZ2HW;b6$BHfbp6M_+^050MqaP^w)gU^ef2B4O7|ZyQ`->?MfKn+s>aQB~a$Y z)cQ5`z9e^+64UR_#yjYS!G8G7(aI9Y)^-`X8CM8&{EnU{;OSx_w_%Vw$oZlE(Bl+qBK6J? zcyv$bIq!4mYYG>Xy@_JlQTUoX65)+HiBb>`PY@>iSu;INENrz*vjmZniFdvJG?Zu^#H6Ip#)~P-0r1}1tfzTwe(~q8_=nSu;;B}&;tQ^i5jh_N zZJH(2$>t2uzmYS|xUZ4) z9qA2%ra<$=1v<$0HpMl7$_c3WtOP&cMF-3qsy+|c6Tc0D$inspev-(I`Nygk0=Z@< zok_TdV!@%l?>UqqJ4IoL9VHq+>{-gdyJ(%~lKQ8y)`n|3TL!tf_0`7$+=e@s$AKog>~>O7KW$HtBZbtJHI}3(fZf z5BlY!&&8&{KI~H7rlQO1gu|+ECP~0d6`1|Xu|oDFLG|%OLDgD<56fAD3ERwtk*UhU zl%d%CB&)&^Qn;whXnBns!CRhwx@-$ioHZMMy2l?M9p%pGcTzXvO$x^YWmmkPw~+YG z$&v^$Igx_2tkEK6zr3T@X8jcDu~-na=T%V5LM=^49^6V>n>f7A?L7h&Ijdfvl@dB}D5v0TB0eRJWqn zUA~^MISfzuh3ORVyDE5_rYz*~;@`OY7gB&d3V70Z6n_i#HPEq#eHQk|vo+vTV0);> zolnoyVjUloX-efRUl}45!mN_j!ZcZSr6_k|q{v~Ej>{7*$aN^+7XA1k-Irf3?#8Gt z8w)cEk9h%|F1afFr!WH3s|(xF4}!7?>7WP-gyIPQ{3e1!D{CB$;(KM)@Mf5kv)sKk5ePowM_`A1GP zO!+2wRlg&Ud+emCr#v;yQFo-x841<5T96`X7E-hlhK5`V8QjVbKNw`EcfR|{%N#u0 zh$+#(zvSxM_Jr$_K$5*7EQsw4VP%9mG8WdRwwGOmAxVSvreq*3Wb}yHBJ7I#?s1A$ zt8^s#0qKernbb6ix*BT6e;^3Lz8Mm6Nb5t$40#4m8|%EzBOPB zVTIHXyEfR9M`VYvX`Hv7OPj=WGl}nf=pltjynSvAI`N*y+Qa?|_y{O61JO5gSRMA+ z)Z5kC3X5IYI5T7wx62ljTs`0H9!F3oBZL|w6HfjLTL0>cxE`U--p|+>5;_djM21ge zQ3y{SJr#EZ8c}&0XNAWBH^*6EWMn|11n?+^q{RZ?oQMT#fia@9MBwI_c-3M`vtaML zK#2~-b?G^tV%BC^f2n!@<7^ENS~I0v%!qPKAo<@19`Yidik!|NQ`guS@l%WB?<4aG z9!pKk$2ep8UQ8#M5c-D)rvi$O51c;yhhe_+OLuo0S7LLv=tZ>`YxV`FzX`uXSV%Bm zkaA8=fB=%$p_8ozsl5j|LS$5&S%E#!7`z=W{NEyvwbxt$GNaC2o$yzAC&Ll3D1euhZsULT4#CgS8GqSXnR2BYg#sDsMvjEH-DB%j+ z_sE$8IB)WtbMK#iM|(g2rjMcKnir$Iia;x;TH;06<)`jNI)q%h%@dBdTaO6Fk%!MC zNxOb3y9(D`x)JQ>%K2!QrZ9c=_M4aSqp8pFlAVkx;_QC1QmnK-iiNNjz;fSd6JNY zQ6Ik1u@HI!FJW?bI9HCYuPk;)tC6-S%)iCaZt&wTH%0z(VxS_&z+5{2cCUV&8Y9Y1 zNt@xm0T;QELV0tnUjdDMpoZO2rDtfR;=E0cXi&t6I;qN)!YEaN#@vWPm@B)V4bIzJ z9?@!rUBSVI0x_fO64%hZ`3KvinMGTzB3MFwG>LPLWoMRk&qla>*8OMnR})Eic>X+| z+AYdGkc)||XZRIBdbODkB=w1#XIouuPP7P$Gl~lcfx7|*z0SZp0=K3a5J2MAhjDw^~BgB^wf}_ zWbu(f^ciT=^|NXA zABPoOYs|&o@?gerFVWL{O(m0dqc9Vw`&bA`;ln#5oPO1z`aWvZ9lHbKXyN+RT4(vD ze0$xUDuL0*&4kyq4IpElN7wY#!eb;c?{wrB9N}fE%ZHIYvAs@`dnMN}?le8LJVrfF z3TCK>BHOLSAODRpNwn(}TjC3ki>6)DB*81g#jNR7wVLh08om6%ERlf=5~+fRm%JM> zzU;uvRKhE<^B==%mbwuIVUzJjvT$nMq9NMICoH;BrBNaXf7ET8q(rSd3Cmw^ zb6TVR>X(f%i{u`|eUNwTW5|8RHf^x?6t5qwkc_~7;#GT?=ZrUz;!p!D1g+{`5T;rM z5Hplp$3mojNDO;_JO|GjGLR4aL?^ARt$$ZFc95))p~v)cx%U{Lc1Y4sKc zj52ZCy*?D5^(bU5a<;^|yrctZhXrbyxV0_$<0jZHt1hyYF+83^ViBVu+vF8y3Gb|firVUQ&<&DUJ^aQO$u z*1ivdiO+6eSIx6!?aUzq)pkTh>qF;#D^b1T20YcM2GxzCH(!`;)Wj=&RESF|S>%Kx zp~bz733NyutT&^^*E*$GNHw6PeaN4{^z(F#d0~Y9$CCFqy1N5(A$o0oo##hd+jOp< zBmvRe(Pho0vJ2m_QW7$?edaw9I@d6LK4GD*{0k^odw9uo2k`I3ld{%>pr)&L;6~Cj zK2^YM)NYJ3x_x{~B z%5=D(@uU3?{rToU|?je3S$?v`kJ>-{Am!t z*aZY`Km_1)t<~4CS^8&|oZiLETXsLl>XT{u$b@|hYT)J$=*A9w_H3#PRMI}xuDSl@ zTu*g*(DB8$>KFh~=JY-lwVPt~GaQ-jPW2nL=su$sTUKTT)!gE9RNmf!Z^T1Is>t>9 zH|K$LCk#gj$l%rXE$8_eQ2Rn`PjTLoARORDdC;F3H$4(kD_vSrtW}uut8g|2-ovIB zdt;do#)l&d=i@gO>h4K6f5w0YAwpl@3Zqwt1+>F9Gh8(zJ4LAQUGAd{5HLL^`F(?R zN@4e8Bg)%3`ol;|jcvcPID$k>dyaREK%C?S$@;*>1Vm_*mO*n=1vX(cOHMkCgfU?& zcay2;3j8xV4n(6IAw=dqhX{+Z$|i|hggh>-6Z&9_QxeY^Uc+ndxs^Nodc`~{S@x%1 z^dYqF^H-W9c5V~Z!ZZ5n-6hPH;`9P=MEZjWU=BiAjO1h%aOCJY#v3;OPNL8z&L~YG zkjONp1*NI&3P^tNHQH@NF7hvR$EhDnVFHKdF#$mjEW7qBrg6FTC~iwfajlGZ@;>n zL?8ZmF)W)yynWu574R}vA9#&Ak*ahYm`{Cot41tbBg3irbv}0V(Uppci)u~{2a5=c z$h8xNcl;z9_gSa zuD*{ldJ6vjB|!6&jjpD)u8ED@BM5^79QT$WO@0EA+X3Jdpq!s$GcP;yR+;bnEh1DG zY#dJqXle3`?nO#R7NzvsFk;MwoYAvl-aBuBOk_2s1LXEVt?lj2BS7L2NE{sq`8=3e z)c6b%ye^w+F47aVi<3!?x1~w)u|;yU&dH)M@!K%{)6V7PcP2v#c1%0HTv3WSU#)RJ z)ArkI=xP2@ifLEtWWVBh)<3gyqI7E@uz+5dAMpIAUFarapgNsx;f%VH{d`HT?wo+;U4O2zDkj)T=m_1#CNqwt*t4prQF%imA2&msOn%fmwd9`TkQ6h-^KCw zK<#&aM?db^uUCoe7G0yKHdiDY!@juJqubSAiBAOBYh;RWdhGnk&92UC#7`i1#VeSM zNN*3cqxa%M?zdVRrq9ktyw0FkqRpCD+Nwf*6;N$4*X5{4{gWVP4NpI3;@rT!=F+Cz zUD%=EdLOHDtV@TWf>5_%4nwtJI{RSqI5kg8>CI>)EgWy?nQ$_>QmhT?ITlFf@FXFj zM#=jk0L4^r9GY74x&f~)0G7gU;cdz=z?0oF<}GNh{k2T2V&S`YQ0-#=+jwNkKLSg8 zQY1B3`=k@$3Ofo6{z=%^blJ2+a)@ixGZCx)<2O{1hCr{thcfyJZN1A0d{EH1={zsn zc~xNdtv}^Zok8mRW!?7{OFFU=_c96SHWeRtHa8bXCA{C%F%G%) zZdLnd&{DKwzw8<0HUDatH|hhsS=eCbc`P4fN~Uo}nNElN?YS&eNX|*GuRLJcoFII?iG`Dn1h4mwC>sY?c+V6IRQ2bUI80mlYX5K zJ}WKP`b&nJg5x@|1^FIvyy^~iZZSnaQa$vF>93g{VAuBndtUaYt0IPRSvM=1tLBFr zUqu3Z8A;4z6Uj}dPK;q*HUEdQT7Io{BgYzVTz-%TVsooo=2rvF(N|4e3kGz%KRPLY z@)3pEuzBE?VJVqPt;UCf8vaXYYFg@Chp*A*sDE%&Qe}Dsp`iFm9D0DLntM9Hc#l z8-E^ig$2!S@v(e%Yvg5Q%IrK3IjeiHI8fz-x8K>sS7zG@%wqJLqei2Oi)8?xxZM4cK zb9+*G)*m%TUbvW%6CuFBhOONdk~p?Om&kaag^JB9{nKC-h5jHWTh(XQCuiM$Wbz`4 z8ow*kE(H;@BC54fKE@?cy(8LfF0BpM*uKP-kL5k`V7jDgOZ0N@xkin8T)ep?`zcAe zV=mR4#*)&p=RNjuOzrleaWxJJg(xdeX6XQTE*~lnu?BLFP&roOG^r$Q2=(~b`(8&j zDlMsx!^+iYRyDhlnc#&z1>et|I|lRO`v!}~d&_JhJ&n~XeT|hyQ;nPz``lWk*JLqGLkvW}&KXWXV1~_Dkm*!mp-~2FxaUQ0v`Wl3v>zfY;KBxS+e*E57op!L^8zM12 zVwTE2-8WPeB3-;wwSU!5DR1KmaoRor$7s2CxW&3q6JXuq}z_<^05cV$K(-dG-(=h2!he)yS8yugeX z0238X*?jYG3>CR95sJbTG#>if^H)Z2#9i8ano^o72%M~!2z z=D{R`mq{W|wEL$AaYikHrn>FXrebZ`Kwfo!x8|e>WLc`a@fz3*qzW15{40`U2~T2P z7E(gA-rWtgCS%A6$0Jl4Y8z-i!vNO5!Y;iz(flp>>!Scf?v66O;c<%68n|$Sgs0FK zm6NY5Mq2u@4$cTs5q4p)m_At{cCB>UlCg&{jj~FpvnYAqq!FmQj&|X>1-md4K+FtAP!`}>w;(JLYd#g482!>7;lXYDS zs$ygz+d|@K2q%a`$7(;23CLLgJxeBwyJLuS2d~KcMC4wJF21MgOx3;VtQ5cz!+%4! zX$dq=CG~PCZHZxgF~SEgVI2{rBnF-Pd+fUM|1mOe$1`%MA3_#8>vn|IA9J6!Bj#h& zcj@q5=Fr)Bd{0G9lxGYRd8bX+c+Yu=?Ym6Y#(WrwZO?~J)-7R{JC1?`yT2mMmAVks zUNk+agF_yCAYN$_a0JQ(jBHf-{X$&M|KveT%s_m)d_Q$fOQWDrx=KkM#@L_H%GqIERZrt-1KI!OCi8he)xmLHIw;mPSiRJRMeBh&uiERENj}IZioG0ZGw0pi zKgMnby1RdzIElWOt0)Q|AALfLDPH9zH8BG)tX*uIml{IZn?)D3%yce{9F5cD73_Ln zG!ru~k?G4(E~Eqr4zX1?xVF}AK5kjiskI6l(He=bQR_d);GG;x^g@i`_UbEBFFPBu zu0NZo`CRBrKSkFR_#Z%}>t_cw0`SpLfP4w!W64Df2cyf@}$Oq60U>`sVuM@<>?1u1Lyh}#<;$|N8s2<+1c3K`B zZldZIy#4Ohu9R$_0802xi)XbOcG&CAG|l-INZA9uotj#^F?~P(<#6ljF4LNM&8BPa z$_$f}kX&!hhE|eey$N)!_vIKJZZX7AD?w%FlHdNf)WZiXkPCS`SfvUWB)2a~IC9e+ zWD`413G5$&XkRU|#LsWV1mtvy9x1Z{G+y3T1oA=9;@dGzy#vnVp-KlJyH9yH;vf`J zzWK6dzjNb#)2WwN7H*Va${k;F=@3?LqAA7C&Lv=^y`ouq9IcO_00Ji993wB2#7d8o- zA69TkK9J5GKOZ{}KD|^w&~Otkog^DOy_^CoomI>>6I_^1;N?`ut~A*2KaBG@JhJ*E zmD@3O!fZu1#(YHw!+M3#nkSm*i@LT zk@gaXZ=g7ykDecKz4@7Vy6m(wHbl<3zsrhSKTm!fw<^uCc-C3NenfIKLo6o4gU?j8 zMMRiG4Nm!)LA?YoG2B=l%oRo^u{0tn@p$${qQ8A3cz9=dylliN81Q=qQRbIJ#TpNl zvJ^&AIPcyYirCBbKf`<}eFP#boG=8K8wAtzjHY2;OOqg=ArK*B2J|g1bg|~D80Rsl z9c|K1OPk4g>KZQCe#GJ8V3&BEF>O?TKessQPJU^_@d-;H0ZK7~Bn(#~KALAKL65>t z^3QTThXVs^j}Rq4jShpaBG2J$n)?RiqG;3cLH`x^4n)4X4+xFkg5S>dZ|G*1bybI_ zBqKC7I^M-EJS)l1l;qnzeGT@dO(zwAl&j#a zY<8S?0Cgu_>^Y5BKo$aOr)!M{$cF&u1pwUZQSJckMU)5WDo5HMGbSCp%?OU$F8Ov- z@W!?@Mj5U?iZ8$Z7c+|mn(c_~RXf62o69?PQWXPqypR-w0(easpeG(){7H%%jXpUJ zwh1{C$Ni%#9@8zy=Vi_r!^fYKYzw$&(JZ353G6Yrf3)xUUAHs(#DBDi_O6l^I#dgX zW&Jdrk9`Z$XuniFLA26FKQruqO>_Pfun%$>x_Ub+n?~X%;4_D8t7p#dQM|3?Yn`#a z(9JU7uW0fzK&b*4|2p{G<@1&Om-YGJ%)$i0F%u@v5g&)eiI~ywOoyv%8Hj9$=+xpG zeUVDE=xN=1FbU6_q0jW{-)V=a)gJ^35|@Z@AE%wI1y-LDL^mIs3S6K4pN9SWwYm`! zAIlPnhOdnhrzXbc9>P85?@%>cvKOrRHJ#UFfsk6B{$$=ZMIRt7b9Z zSL*lOV6tE%6)W!}WZn}ZAzXU*D4J|zv857TS5a~*_hb-y{!JnbFw7#XpNx2D=?B3= z9&CfV3z916x$~-Ba;uko!v+@~u(z5k<$Pur5pn6 z$$1ID^1Bz)VpHP$lM%%?0R5fm&A@F|rpC5}!e{Z|k~j^G#2;}DWB_FVbZTt$D;Pul zjW$@(1zZv*fH&OW=#g_Twf?#LsdgB$qP$U0_&`0QsI^f|=5@I6L~jwZZI+lIrmVbJ zisW&8bcY9D6@*iV$daut{QuB)R!wnq(Hfq?2`<6iHMqM6w*(08Zoz#ZKybGJArPD( zf#43o-QC?S*kIF#?_8ZfaH_hxclSkiRbQ;V_ge3IpL~%I)^CC|V1$Ilt`N4D1-W7X z=K6TCO~_N8)#-}3r)l+K0{BBIbJCJGo^6n?a?kH*Uo#Ok|K;#C*I(1M;Y&1jO`v$4pO0eeT+t5_@E(wJtNnjGR^eAN(ja-!(5QH@am@P4_ZA1F#Lg*b2_tHNBDsTCCSB)Y6pvis*zEJFjG?EuR(b8|#)JggGev!QE((!QQ{+Hsm|HaMbQ|#5w z(>Tv=?F?6kNs&m8gkBV7qJ0+%Z~Fw+8(Z^$#vj|eis+xj&zKjN>*_2A8sh$UUFli+qU$7NaBJdM(b-fZ z@&wpCyD*E`yCq%%g#b&rM@ZfeY1awBDF-G0s2CyFYOB-s zPW-x)lU1m^U>hL+=ClJ?Jzu;|gBJi)s`3iJ@PkSWK~55;636}&Ij;O-X)xNeZ3vvg z=I7xHJxdf+J=9BN_4xM=S9qWw#VB?=yeb}3dVwK5(F%La+GvKbrPJn*m{%aSAHRo% z|J-3syLX%UQ*6-0U5romv1xTg5R8Nfqp1JoW=j$pMt@#f!zj^ zYpg?wWxz^!~DejNqpWJJxj1&r9QMQ|YaVeD7Ku71Kr6>GC4+X@HD;;If9(M@@v%Sz7FIOArL z1mw|W#Z#CWiyPPyYc7c1U)bo+aYWE(jT=knAF(meWvL;8d2_y8;ZJ&)*D^;HX6JIP{|act{UEJ_8d&rj!F~5bM_Y>5cJ_dNC#8%&Z?pdl zW}wUFa>>gDWExlqi3d{tE8+pjFA(^vh8M`IQbJ(p z5rDBg-qw{{qyr$zJ;Bgs5Bw8Id{{$pNJdW}pK= zb1RB;JOa<4ewkK$B%K*b@7+lMRy_}`wzholh*;`Md|QNvPR`EY|8o7zF*Y&6wCD8! zaEai50lt%4zT^t00@GI}O6(SWQ!^t<2$T>4m+}hM=C>Bi1g)uwZd8Y8B*O7QHatVr zk}gcr1aJRwY(vzYHJ?5L&+5m3kWkamJRtD~C^`n%Oyf68 zttUUr3244-?D4MZz!zdYjI#{07=7prER{?YxH^!NY+2OgR{9{n|P5cENUzdSWN3)Pbd=u85rSAYu*7$EgR zxzMuDQIRmCYjq2Otw1N-PKaQwLV|T?z!qH$aPl4oEW1tL9Mr|WRb5}=QC&}AR2Ba| z#Q6}(ni;@ij3N`!X6n0kUHvytRPVE{O;d^=->T3Mxemeikk-MHo?q?a-S#6TJ;yF% zW^t1_oEFaL6%SDU)ir;nRSP9Xy_hVt6G8pIEpJy$+pnpsLzWO1tHXlh;zc7=(Ur^u zb;0s?&*&ED?n@y&cV=SMf-c=8;y`xaO7b0-}5fsFI8C$id*C*pU(PY0gi$fx8;jNBAG@F`DS zirJVwhV(XqLP0{`waQUT_~XCK^yGFEUqAI^swZ@Z2CQv5!n6brbg%>`qQb6OA>qD+ zxLa`7w611l$c7$uNEGI!@K|G&!OC}SUn01sH@8-2E+bW8u9@2mO`_`v47!(Fxg>j= z(U)*dZtG4yH#0$(dbtIcf*p*auze(-e7ra6zrJGt0aA!R%#AUxm5qC}6Rdk?Fr5E> zY!2Kc_E+nwB?4bMo>TaYGuZh2*!J@YYJ1ye+UMqDMvJZ+F%e2QvG6rRH0KS+f%+H3 zui9I2{x*L-vyd+LckO+YbC}yy?cTPvCy2EbG+?ejj3cj;e+I4BQoUJ!Iz?|w$N^pC z`eBK?W+MaJ&zRzZ#kk@gWvl_cZu|irf^R5AI>6#)r>ME35^r*s36XQpy~xE=ve0tZ zo(aS^e9;4b<`M*ao}+!gd2*xRcAh33U0Wmj{%0pF*{NlutuXl&7`EFEZw$X+<%i)4* zO`M)uOi&lQlD-}1EQ3Z?fC+9Y+ZTHZvKda);vyHzuuRjhn?X~ZL-C4alA4@RLW##z zP(g#pjRq;ov(<4>r)Qc)&fSqm#{J?m&*r48F(`>&gs{kv2~OY)&?9W${1e>9-HmG+ zH;if$n^ag`8=#`VGp72Xi%hfLP=cN7rrxVt2X`gYN(&3$doTH7LHAyS0CjvZG|aJ4 z`ftmq>|eayq{#YD?{^2T-}4S&MSW@`*lIfs=|xI@|C?AnvFWKj(P!Sca00=h(4~j1 zu(J}5JCBW0QP2}i90{dWL_ERHH>OmaCkZVq?3B}LETTGMDM^e4|9HQj#rJ+*!H7=8 zS~OYvg=8o&DSj8(P}YY>I;s1Im)vBtI7#Mhcw#GOsZ+{_yAnZiGjwXDh`MwtvE zNAb-D9SaFEwQXZYD>tG?7w)79EXiaEoDZnxjH;+&@ctyyJDAWYOTVJM?I`OPlLzt~DcI#e)N%mbe^wrF6f%tg86H;B7b z)k?X#Vv(UbpWOoFpncd#3GG3HYS59p(zvZ z_ggw%6;&=?`#v!xK5ei}2_Gkikj7>dnZ`z0oa!KcT=E@}Fg2mOI&H??KCLeAToUVw zdN9dQj_}O!mn=VBjIY*>=VEnro*uV180c}as-_5iuG&2AJlbS3?54`B4x|R~fg=9J zp#>?mF%AN%0_m+0B^iwg94qQe5>%W_f^@uWqBPtr!e3{28gvaWpyGi~x0H#Af}ByC z3?+rdMK+e!7{lX3VUm$4 zvrx5>$G_25zjc?>dVenA13kP-DQT0McoRV<&ha+kbgtH1?K;&mFzUz83gWsL`&+n8 zt6&lEJFp%qC#fl$q<1i9*Ep}fIJ^(yrg*RecpNN9vS}I|1?mC$fq3Cd;WaYlW8{Mq zKw2Pmcu@pbfi6{$E}S8f>$@&ukkku8OcZDU^o+Vm6MO=4hHs+W{2WXO>VWHvf|K+N z{tBuDw}8+Q7ST+(yI6vi!OjRi*qgq=SYQ~^`8N_t_~1KGAgCCg4M~t<6E4^R^alI` z&WCi z;2;n!=nm-{0w=F1~tKLItHhJ z)8WaG5|I43f?RwKPSibE=qCkZwwf(gOmNYE(x zm2;32=nuFbJc@|oV}_<@fwpLg2DL&XvOyEHeM*vVbq0YA&Oq*GpqMjI!5OIa3^a2F zB0RMRJ0tp_Q&nSm1*5$lcU^vq4)DB)C`4vhL}rw4uvc0AUGGmr-u^)*t3mc&q|jsZ zcbz!veyKyQs6lQRp%5kX@1pX*EIjSz`GfrY2U&f9LX_OUOZoSC=`?hfBNP}D$yo!Fg+;5(bR|o z8H1Gn)ZKm=$j(sFOKgXEK4O(y+)(NY2pm0Q5#|Ad+Y%xlfaVRi=|}a6^ctG!#5R&< zeZojX-InmxLm8S`C9&Q8*0{m5_n2-r-7>r9*P4TY67aj-b;7$_IZV4XBdWVYi)Mo} zBSuLFI?C8rD7w1~cnV1w4Dd<1ZG?j-FA~%){_wldEq$le|T8 zl)qiz)(lN3*QAXnwr}&{vi~=YEeBWf^H9YKzttop>ko-$E$Tz5Z;#RJG(SZ>i%7hF z%(JF}C7!*mZPfC@gdBO@N6mp)oYI+wl2YZ2kkXT2?a~!}{^7?a@a<+^>B06;=|YTp zsX7(bZO-SnRvQV@FP~PXf>CvBO?y;$7j-Bwm|10I)Yl|?a4<Fn1!h}-(_*qQmD3hqo|q-{p^KW zF*Qn|sn!sO7RLv~6z2@f@@$UST1yFLc}hlFI|$_o9kEWXIVas$BY$ zH$?2wTlsx+gj3WU=2OYRJ+?>alpc%iXNM^1V#03EEOiy#`1DS{rgDeTCpAl~^V1?3 zgo}+aa)zg5OFC!eZ4ky~Y{tW;KHX;&xUW`DHsj3~TvrJfpgA@cppC#!%H{VQn)4DE zY5BZ0dQ=u}F?{0EPx%yEwFpBsDibC)N-K@2GHygLLR-o^@jI7Y;J}bZ{H^wrG!!{M z+geQWgaT8p(P#Ll0prz_(}L^*{?j7gZ5Q2SJN>+ifqzT`=fUh?%B*lx8ywwdeOkUaHQre#yxizT84X`CL&j+wO? zA=kAp_*-Af?YFc9u(g;Ko3)Hs&nelH#ob$Eu6y z^UmP?pJtK1xI3J`F@AGWhJ(MktWw7s?SwtF8-*c;m6?aQN0>_`P?}P&f7Uy+q$ZFT zGR46}s~(K5(wl8I(~CW_h~?o0pZG!%=9M>)V@8YgFUi$Mr8Wi_ZrSi_#KSj#G7gdbi{zj!`4@jz^2si+@H)^y2399s43tD(-q^ z9MgvM9B)mD3~C`*4OI$o4dQ3y42!~~E2Z1fPR9_;as%yPC)i+DLw`k21ABaLL#I#J z8G1#R*O+Yenz#V)1<5jmrW#cv&qLhtGlDq5uJb6x+o~A zPdxe>@_zdEO56f5ukb-GkrGH}N8q-dovkaF5s68&g)a(0%&mQXVPGEl=IZw+Ua~l8I!dyjQXb&8s4W(63A=lC8cJli;8r$9eZ`)>E ziKtgKTg>KNB^vuL<7b<>yq*ZmdCJ93^``qn=R7p0B{t!}{nJPH1C9!P->d08D_xSR z>FCL^r832(#%-64W&_@{^-G**4w}T9>ARNH-D+uj&5;!SHYHiK8II(yd&}nhcik{wtwYo37!q7qqh zx)&Xdb^^zC%Iy=!7loL*T3w|MeKz{I*c9BBe?_u6KFnq+GzXqTK(ePd&NozYr_V3cOoSOqq4c(u4FJ`VyiY{-H!Y>3% zU3jLksijmnL|r!P1g_m)*{S%{N%XTY2&P%({606Hfq{~5s z%6>USy=iTPgIh7i5>6Ra=v+%0vq|?3_4T5z^wo}acNB)_7GLLx2b%n(qypDg3r{eF zSv~EXk;={DA=oMq)_0kWDH5dQu9MAICIp-yEjf*^DjeHrS)!Rq>PLi^l!uoeV#hJCa zdW&=a4_u0GdfkNnns|}znXQBlH|rtoCHxku@hYblefFm_zC4EGj^gi3@6vqi1as%0rrM1t$*4*){^9&{6W{;?_ZE7(~rwvU~^ zt+x_i>+^{qjV~~gWQ+5PD9Y<`Qo;o#cI4g_hofwKHYV?Kj$9Bo==5GJ|41ZOa}ob>)9dm@ww2vzl6G@`k2h{_;mjK=>zm2Um^s0i*mJM zQZ}u;VQhIf(2Nkv=<+S1gvEB}h%d`W=NpN!2G?5-{_H3$X09$}4ATI;akl^{g?~Vy zJh^ed=fH>nMHfU1y-(p*JXE*hFjN=jg-40^BQ7E5o`mwgjx2Yx{EPao`T*k^_g_Yl z%I=a_uf7MuFeq;fQGnl(Rok&1gZRuPqB!dXV?cw(+uU*?!rTn=sEOYzVH~Hs2^`1n zWWSAtKqk-UI32uyP)r&$z$UO~n}G0KYGaW~Bop5OM3b{WgeE-v=qCP0xE<>(#DwUO zFD`;y2pg7gTb^yG(ES&&%gQyv?UiY^S4i6Ywn*CArO}JmR;w=Zj=wlrPxusBz()Qx z6lU_Ya|;u=H2I}<)#LmLI>qV>J?h4~au++_By3pk;wZZr;JBAaF!|KeO`3(dy#R}7 zKLOcp2RspiQJ=o6T;bF7NTJXLe?dy5??H*&qyf{&Vvlno95feb`JadTvBc{utd^F3 z{t``ue2!Cp?pYO`!jXZ1VP1ag@E&$|e%2l+R5RIj??3@Vlg11jXlts0rP0vpq_i>la?l13oV-n)}U zP1~m7XK9~M^V&dZ#`Yw!D+6oK>M)-h>f%>QW>(Q1Y;O?0?RVV0iMtX{-Y0I(=r=r; zx7Z>Y6Un0=vzfQlKaluk*HNL8wXrKd1>+UfomUdqQTd3mLxXwP z>y~i!i8iw4EuI|pE&u+mGuG^{J1pv)-nJGfxovI_PT#arKVj9EI`JOLCC;s}&TY2?&5JiQ*h zxO>9E_V0PE?UK95q}#cYt*>}ZotZY}%)j3%`4|_oS^Vas1j~ckICWD_l#~6tu_e?B zr?xw6-c$W(=C*iVVg;()IZD~0lbH<5K4JW6$8qk~q@MMW<{LVTa z_v!3eJffo6oS*F35`!f}&!zmjsZ%euLztkHGWcAe@Ri)083BO~b+Rp}!u>Y3Y+~e; zg#X*YG>Q1iWW#p_s3Y+f-tGRW-74H(I|45Sw5^+}q3V*$E`IX{Tj>J)~6Y8=BT^+Ab!^0YOjOOSj=L?LUJ}_^=`h_G?OJW6)x?V$kqmJxyKc z^?EY-`sg%korY7M-QG)+y?7^|VC+aYA^KX4{hfBzH$XdTuYv$c?l68#3zho&MYbm=rkG2fcXO(w=OS$)W#VA zq#bk`u?us`#bGCx6PRcu%R5SFtESeasGZE&vPA077S!rn%2LM&vlMC!xZC#{0TU;{z$HRimNM}C%B*8UN&<>b0g|jVqHhfEWASZ^ zg~^wIv_;^C*Z3=yw(`?OU+D&rB-anABL*JyUtC5Mp;5`9X?R;RH)BobH^KF?-zraW z(w|7fZo!gmP6lx6y3RD>F6ANPMG=YbGTwnZSY1Fttx6$Pcvx_21n>ugk)>ITYaI)s`R8M3k@Ke93k1(`?n?yUz# zJ-JWC0TeE}QIH^=DkQXENG+^HLF{G1^`7qPx7>62J%s0DWtqu5;2&VY@XxX$DIaq) zBCUUJGWym0QbodfhMXW!R&p!E8=c3jT$qC^ z5iLw@JxHb*Vv!rk7vVpi4c<0fO6Gm?T?L*uca_(6suQ}{7>8+iKc;u z)qRTGr1xUv!U&LdR;^jLe%4>xj@q3y)SycwY>4L4B+FZximMOrKcX2P-RLvTkKqTq z$lstw-Xv(KaIsJdg!KmXY)Pau20y^Xy~Mga0_Q(&PasKmo%~n7je(I*^~FE3ypaUQ zNHKGnZDYS<=q2dho}qttZvyAuuOS=SilTHZ9t*i$ox^=UM0IU`MhK8g4>mMH+gu1+ z>biAeu6Ep&-RxiWg^q#g2P@46wj^4_;hblvp=#MbyViWfhay? z{>8$|rb=k=?xE}q!V=Adpg{blFuKuxnf2U%2HjqLSUfz&?`?9!THnP*wk6@gKxWzy zLa~6?7^HXsT7Lfg3-Qtymf%q^fZ7Bs0Si4NIdy<1JItnj-cR(S#S1XI12MU%Er|qb z%A-t0qfexf`Mm2k*FfVI>>dRPhq@fXK9V$envV1xyz`~iOJ{P#by z%k&L?*Bf8L&gr$)&`J=K({<83l#fvx_6(vIkHJ(m9ixRSA5>Bo)5lYnDoHsUKBm&v za_o-iC&Z>m5mW!+e`&XJqBXR{?BM4fz-0I%`~nygyXS$M+xkpX@Ui zAz#QTustYY4xeP`ku-0fVn{YW$l!roKjJHR72$ zu76g~X6?X>#h|j(lKf_#9Fw0&we75tS=cKfp*lE20`#n{)fxj>SPVyi10E3fQQ&h? z>AWhTbu5cSV6$EG!};D%0wMCHO%HzZfYr>kLT`KDnLG;mu{Me{fs4ibUgBX^?~b;n z-aQ}9=9BdX`m+T^84vKV)-)(r@MZN4Jtf}KGJ7?p>hT|w*%A!Q-BVu;V^rQQj zKP&n>eQS;T()`!jwYCGD?Mo*#CtI)8VkX~pPF&p?5ow(3>KRXa${O1?QeU($y+By+ zFOGC@Co8pm^Pc9D6SEa$WXk>gBFN+4oYm}9ctd~H?BSrHE7LH#pv94__qEzAx4Wrz zjwTy#$qW~~7^n7n=n;^ty!K}_E-n2N%yUs()wZ|IC7o&CQGdbBaxKmD-WW^%7sMK> z{5au9ENnXMdJ)pBwii`)qTCcXpLXrXsg&Gi^>gS9*|%hom-Qg~9n)4P8Y=k>G|c;W zE_#yJuCCG4MfG7*6;FAZD`7G=Q=~m2mi)ty%$HI+Ry#AMRnmVHB(uUF4(q2yT8%t> zQ5);1tNt=%$n&hQysgvP|D=Rn2chUa9PR-E>b7J8b|` z59rS25C}Q>BsoONwCaU>V-x@W*ZwxbD-4wsTUiMJ#2z>XB)+GYvp+9n04fYqz)CxH z7#=gQNBLRDPbz&ss9#{OQ&$Xz+>=rPl(7EWDAj@CtzKLkolRXQ=+h9c*U55F{FZ-5 z+5pD^1fe#&l_0eVSK@2#cruk4BeW|Ix$9hJw6ack4=HC{Zuq-8Tieq_WMt+iQA`4c zFn_K{*o4vp<&g4$m5oD%1pDiUpK`l*tQf9%=MbR=PZNa;C&1a3Vn0yF49q)SZNxR67fC4yz6QeYPl=3O;YImQ+`-Yb`(~nEgcvW$p)z;6YlE z@g*#G)3&b`?{web!Xjow{+e*Th>}zg0GUzXw(o3XC`+&1pXY3TmCp;+t~7g{&$I;l zn8d%tV1!AhPvI{YM{4_!qv&S=-OB7piE|}8{$0@u+88R0b!O*>LCU- zMFR`F3QDlIpBePBd!;U0df*hPPQ+n6KFnX*Do(#wcsOltjJ%9oQ>v?Dao_^_|IMVx z4fbnpp3*2bE74~@XeCt6%H?on^n51P57uG>$1vflTl&n|;=ef#2Fq_XA49S8FOe1a zrBh*>l#$bvqFj~S2yQkc%@uUT#d&t=itAL_}n<* z>Wh>0lpW)5d1hS-ZSsIK>Wo<0-d^NiY%kRsDthAKL^V3&_3BIn(HC4=_@NrmoX=q! z#3h#^QJ>{`(wL(&)Q>te5D z7-1_*aUBt8ZPvN>?w-&1G3dzB(%n#mQ@SQB@mlI&ob9OQTlS-ilqSZnQ|&ykUAZ(U zmC2|rwRq}b8uJoa9=GH>E`%U?`1zuEz;>v1lXEefzmrqDi}7kK2bH6oY8ZmNzKao^ z^8TbeOveLHYd=fggYB4Ph$8)b%a(RKt{-#4!={RnRh@ zTHS4mQw)01Pl#dfku{BF~ps=o}1HN-JVuWsabMU zB82HKIj{8Tp9{DX6ZUY_M%JN`n3>8tEB(AtP64dU#Eb&-EU>W;-t4fb`msO7Cs!@E zCU;Xn{UZmYZKq}T_h!rYwKvTaS;#ES8`*+N=l5*!2IB~rV)Pk1;-D1nFxo$)OyEGE4e#?bDxf{t5{2}DJ<+VMz9CG)9?mdylK@I6y?7=qXC=M z4_HDlJdmB_B&wR}KUF*LWqCD?O=`R9ZgjQC9u~X7l-p@eu_WG#f4X)F2BAe;q+gY2 zSs2`DN(SBoZvX~az6}>FZO~;pE@kN-zp)aww=Wzg^VruV7n(Ztc2|2Owm&bcx4+=0 z7`~%J2xqnNyn{COg}ni!gMRFzY;N>QlpDhsmjLxvsDSQTJ(O`?$Dxw&t34aAGo0%v z2OIdrymC89;P+IiyNTJyxEWu2|2K!hF>K9^^V<(@cx$q7dWxXZQ66EVenl9N`> zrrhsN)K+>h-iWfJ@#_s^d|qTh^ZjE$P%NNJpwp~3zoXN^7SKcVNw$y7dDr6yoBbN+ zxygR?;FSv#C4+S_14IDSRTKu5G&)imh4lM6iLvhCnWsY%!p+wz&h=qAZ zrlJ+&A^9C?`E7Ie!yw>0y0LXU)skxH#kcAH63D^Bt78;$jv!!`3|$C#F7X0hB5Y6} zmX;F%bS$0rYY5UOK&Jeki5PGZZSN#z0X#wFwzMbZC|%6tV&W$I6f*La`&AS&s>I;U zqc~eCs<~QLfwwI*?j-uBn{ZX9%~*yjiwH(f|GZsarerq1&N;f$rpHG!HX;hB;)G;x zRUHzTakZqu{}dszvyJMbv{h7n+j4z`Q&;!Q)>N{~+2nnM;<52OmuHzo+#~^c<3r^n z=));aE;?I@;kPJ)D?06qrbl2BpKcToK~WS|ppg5JKj0h>PH-84OOQwgy?sxj@!5*f z$6Sr$vbXy~p;i-Csb~PPSmp`%hasZ?LO}+2I?aDlyWJqTogm@=p1}`Ju2G=>* zrbjLGqj`zlw5vKzV zzpHcEM7x88Jvm4zJCa@;Is1ct1Q`CKDgN+}+qHc+Z2)&D7qe5zSU`qu&QFGZjw6}Q zI>u1)+KxZ3FN_L*)a<9K*7wQcnzL%_wYbUsA0H~hdq^v+YLUMcty_)$lS8!jJZV~d z97P)4-_#myxDm8$tiredSV!@P!w{~X;(-;xib^9JCiAlaM=>$f1$}q%r-*E=h4h)q zij*2sJ7j71meGy zc@c(3e)1EDnUw^gZ1UoRkDN$14LNasCVL^`7yl;5{o%UbW$b8fe2gmHI3KrNP=whw z%6T#QC&C{~bR(mq@JqsLJ9eMfwc2WIB6BWoGLdY%lB4^&A*0W%V150V9xCQs3F;B3 zK%`~4`Xj59HEUnt*cp-qVgal$NF_-+EiHwLUcN(6I|C1?Mg`2zF4 zH|2b7J!%`gHhC@VdBX1X=9WE9x^)OkCkd)>1T!qqN4H3J9?0;PIIE8Omb7>j;T8gH ze#~w+8WJ~A882HZQGS~FGCpk$NBQX*LBuYG)8LcxMS_;{0Ty?8Lj#$^v1ca69{j-m zl7H1%+zDB$_CizT-NnJ8OR}p0he3CUTkgnmY|fzF%2UOt*(G-wt$SFn_Y?A$0< zqvfSjvuZ;&jCv>(Mfwf5=jc4WALDp@7olB`3GHZ6nZ(VykZp8ShNQtNuVP?_VEQs2 zXEo9tU2S&|yjprTCM5Rcz2mn3Jan|T@|E;e1dnG?G7xvx9)tIE&My%EsW+-sHK}hU zst?p;v6lB+A;pnEJvNb_w z>(J)sM1C^Z4vO=y6bi;$S;nfoE2nr#{XNqa>rLn| z&w3B0>pGQhd~#W9=H+Z9HZ8mdJIu~M8Y5G@ozMg}>^T`B7i7*-o(MM3WV2NqLgCNy z@oJm#EMTDmWUsCG%=_;$c7=PI+U}gG8e~?ul2vfO3h@!=f(41M8SxST%6}4x8AEsk zwmpEmjnCW;z$rQi?d0EkT|7pJ&b1t|yuhHJfCY`(_XLxxJM7DUx2Tu@EWMr8xYfk}*_r_9%_TU*cKBKyPP8Q9z?o z(V36>nr{{*l&sAmE?`vKl; z?qXfDzV9@L9^>yPd;jqnIKOgYXICMLd3xvdtfFpjK1SIhy6r?}Te<=2n-bp!2(9hD zf?%pougM53^{*ntz{1;Sn}u!2a^kkn^!}5bW5-ttK0cTEfS~r>{hU8tZPFi0m%N90 z9JIIvnw}R@p+M53iLCPb!FK9YJYU%F=ScrcZai9F#Ag-qMF`T`j7$yi*K%7oW^h=A zee*g@F$;P&_@a9bq2mbWz_#Ccj@Z)71x~tYCLncoKxV1)c1yng%EThT7NiM9v78!U zsVhAs{{uZlw;Uou_|$U88BEVWJyQIUcI4%PhLu}^x(~CVYxqZYa6J-bVm${T{7Hf{ zGJl0Nh_nVA>Eq^ZI+ME9W&JJQ5ocVk!7YnMgeWixH^%+FqAKy9Ydcge{UC%z?LY8~ ztsW?UTwrjLf2xrdb0y#e6Vt(Dv6LXP<9T$U(=`miXR9 zs}R0mM;tO4qz?7BB<^K2Yj!Z6J6s4JGTx@rPom$V0dPW@%J6DcBYj0vJfDw-;|9yt zzjl@V6^Gi%V+Q$cqwlKxCEj#gq|O^Y5Pta{lNz+{C-L%S%K9-}fc22@Vxrh&DO0Jl z#kUx`{O@Y}gvY)2&*y+E472Z0YpqDAPF*830YrG^^feBuW`K8PWWez#hG+BG?md6= zgcWy$^{%}@Li8>YX1nn5ai>1*h7s+4Dp&38%+=P&;RMn|p@jN6Z4gea)HAnh88i)< z1vX<=0rlkh*`&|$$?%0n$FX6MfHT+njh@P-bQhUTp<*=OH1GTyCjuzugP)-nmi+P| z9k4_Cj77VHM8?P0RIAlC%Pn+VwRav8Zmv@;hA+l6YrXATo$v7Bo~W)*bTn-B(nG!z2oFnRQ{C<{n4@{m|e=Mzts zUHUH`MV-{}oF$mqJan3W6fenhmy)Tuo;S!tt0Ox7n=Ld+?@rX>@tMfup@H7Z$_CD( zej2Z(L6oqy<0m~G=>l@=l`@|P&k%Ae-!ATrLNL3R$RGTcnSFS#3{P;YjXC_9OEX$4 zuqk%4IYD!}RpT&O;9gKO;6Oi_a%OmXG`oi_E&zHpU4Xj@P@%b@LFzGPc$E1q+HVH& zat}5ZQ11R^Kq@)M@ufz9@ED>R3;#F!#ja2%jlI zWoZFv0b4OCZduS~(WmDD%f0zwZRW5uv~%~q>d_fJ=52Ur^9GQV9B6+u|4QvJD- zqmNjhMWWJZGNZog#~FhW_1x>^r&qI5Tu&g<)Mh2}RjZe`fc4bt{O-o<{{Bx+Hpt^( zCWG!BRJG+~UrW*dh}thAQ<;vuulz74EV38l?bOkfBJTYcbFP9Bq6US(bTuXH31c^k zcsv`3f7&bjBirB2EUvo*GAz5lo1NdLc@4(HnBe~hQ>`(u=)-EO&Cj*kM+)& zqh@3seTIzNrHddbVByeZu`q9;rz(PI|IX$!LC1Ra#ZU11X1_$?1Dlr+?ajxL4G1-d zOxKJl+$Gj7z%Ba866O**2&g|@{H{?7+}H+uxI5|qQ9X}o*i7-W?g8+u_2j8J`zt?R z3q^vW6;aT59=U1}C*r%_8N9CfQrFN>Lp8-|=wbE}7>UfFcT?S^ck&G0+-Dq#>rYcW zk&}M{k;GcUH@12t6%IrAQTa~zQDwT|zOHhRAVg@g@I*Y%4u+7I`NEy&`%IPhFmNTQ ze4)`b?eWHZjsdZTg*0YS;|osq+)(5hhE1=t8}_t)CzrX2Mp)gCM&e*xd0MALr9!7J znl7nJY*$B-H+kVXAX{}xklH&^a7oJB;Fu}&E~_1+uG=8~$sib4Vb76CQvAb7qFQzY zjq}wE&f_T2ZtL+#Wk*7QcVM#`YohS&o-qfq^u;*b-*I)2U+Ny*mIx9Ur~C<&bS)iH zIwUSRVO9k~K99TEMxvb}6HV@?)dY@(DXM7Gdp)%Rf#q z)CC@gHBuMh@vZ7Xx3@OiQ;-SRt1+}Tlvp$b>2O1W3K08bYzf$qrvfK2F-keV5(dum zDL6>KnbcSWANJaN1<~_=_j7Yw2@w0nJ}uy$N8-91jSf3y;4VCgL7%9{A^xg`~>_B>B~v-?*vrDGCmMGZIS zicY@XX@kF1TLyor(Ea%-8||orMXOhFg!J zr{;+*S|Z$#BUu!lQ`F$V8+p9@C+FDXANpLMt@zxYPxRm|%+>zsp+uHkZQ`xx_kFhG zIw6zYbUfLjiBncL$}T*vunromPRS&jy7tJWXa;jG5Y9>H&%u?3w{uU!fk_i9B=!f> zJNfT2oHYV<*9#ucgx{YDUlG->{7w774Zpn8Uh!DTzwHu@6zK@q{JUHx5fhDd7IGIt z3ZD;}5m6AA6iLd!z!?nd777@nbHfT)|4}}d+yN#_YbL{R(_}O3&o)KZJa{sQ#KaiR zK;RHy>F1O5u+j1bI9jd)*i@&0j28v}$BL?##?Pen@h=&ja&|!91WsVaOGeEVoElrA2*$|C5Ca0M2+s@K0LvWK;rnNWwK+QSap8ZRBI<%FiDifn>Z$%^i~zCa3!&5S>n@Y> zV?C@G&~-RiX#6##e1Aomn*IG}(#?j;03?&*_5?_}GBK(FvVP4Y$!XHxYl!L2r~C<>PYIV-Ire(%U;pg&R(e)cRH7jaX$a^dE+$ir-2PJ=}j*AHkBjA zzrsAwQzC*H2c5qhgwpeO^lKN>AB_7hXJWS>FcM*>E0#9pX!MdT|TBKS<4)yyVMG^ zjNQp+2eb~|)ag9k{yRe}?x{Seto({=+JvE%zQLb|?Tx+q%gZr4YUVGd?#vJ4{v1Oc zU6GvC&{iVVGOwj3eDC&LJbdO`*c4Zv z*c&j(=t5;w{`YilfO*dCO>$ln))$Ww`C*2Z3 zs=3fCx}LJspog-FFPn9wK@VTF)h;(*Dz9e*h`WhLm>y&%BU~BWSS8TJ!98EH zj(?)we>Pi;J9f<7@rfVlJ~lkL_4zS1eB36`x!rTFUNzn+h|eL&QcBUmgG5>5s|6d} z`yclHGAhdN4Ijk`0R;g;1!)Os>5v=@L{LC!sZl~&Kstv;M5F{HWI&PbR$@T9LApe` zyJm)&XP^0e*KhsT`oB3Z&RXZ)c{^+G{nYGxUH7%G`?|#x?cW$f8?GY1pBNJl4I$_NIECvMQVBwM4uk98A<)VnCRSmggV+#I_H#u~ z2P#swP$f1Z>};b)xXC8Y54v#|@CHC8kUCQU&c`9xUJib;{DPyekNtPwjg1EoW2lx_ znb3dF#C;AwrR%zq;8qzXUkRRlgUb9KUdrpi8wRuopr_^1Fify4u$k#`SSGjrc@8nI zmeJDpY>=6^8+R@g1l({92p=f#=h6*uh^M`F6hS31Cxw-SeDH?ky3nt4ne((TKfn%W)WTySV-+i!sq974m8WPSVXg?X?l~9>>T38hH-mx4v8f2sWRV7aScNGEX4N9NQ4+ z_~AOLUmikeS;Q7s_dC7=(lvn9yz*6CL_Z*O+cqN{VXCoeuC5)16IQha($-k~1|&KE zlJFISE|78QnMnl&$V2W^B-0jpKasb4G0*x+RnIXJ5dcD^;Qosy804d>$X^$tUPMMb z0L59l{|`j#Ieb^mO`cQLPdWG&%`)84F*|%m;f4EVn2+txk>}M*Z|`|8$v&-q=X2Zp zT`nI#n;NP16_b#>?^F3NT_x<52gs|xTnh`46RuYOaDC=~>Gm|6AmjJtohKa(D++vB=yLdNzn*R$N9*oD$(q(ysT>T z?=)Wc_WF(mw5Y~$&QbcJcgdB$uZQ#-MBiuN=CBI5Md~{{__(R;NCoNTW4o>t{3@mI zFn_=B(U`NBP4zYpqI#A~;j>aV@hS2jDfuOqtYCrKS6(w_Qv3&`w;Ww=P`lRCL@53w zjo93!XcO#fWv?>`T4b3Bp}To8+`Sq8YV4P_?NI--INf{fyW=*3%wJp8EE^?;r`}(y zuCu%Y;q~|a6SFom9mW1=2`eReBsAd$mqID%Y@p$7ID#*_UWYCvK5po zF}^{(9<#)}wD~H!#-c=+K$hYAV@|gALN_#WYnpzBoFqvU&Z~&%$?4l)T!Z|&raU>f-l$#rditsfr`U% zL&rpE?TuW&!K#5rI=?VWUmc^X2i#qBl5*5H9DPUcEAR|t9B&MSGCnbuN)>oJ{WdIT zwTiVuf0>xRNY~Y3TVb9@CcUwvmhYl!_s!IM6Q`Zt+Q@}y6F^y!E$(|F^D<`2Dd6u2 zTS#EyJ!ANf(MDqRhR4qByP1YhGektbE1;BlT;1T<1?#VB&%oe#OO|g`}^5e++6R=m8;%BSVD?;|~}QS(P7m+l9? zOZA72Z&#kki$)D;ZQYB@WWDxVOWbl4vzvGa*X6WXz^kSC-m3dw>4=qVeu)~Cb!1xp6-#iCofb-5=IIZm&Cc1?)ZLwjF3H{X7(H#6Yi!6 zN=xhD&w}e7D?Ap*Dqh$2{2=i7etd;W9O9z|*HPT6%yJ$gko`iGH~Hq2S(SK@a}R?@u895KQ1|A=!E>eG041Qdu#X9@dw&2NM3KCvQJT-EfxL5y+AqyD_nzdYe3h&&Bf0vOP{DoEqW!;A6`` z`3UxAkhn67O#HxuXQCfmCDlbNn%#Kc9?m`^GK~wBO-&$`y8FH@{mN3%+LgcF(PGY7 zKVqrhZd#BU)trWu6zmhHKb|Nba1&}j>r|N98#BonhlbSWbN%z+K_`YzxCT}gFLNlDOVrL zJ8jZ5#AvB(RL!*R75ETwr&IccwUcI!(7wvmwF&CT4{Ex)OXNK3SAYIBHfX59(tDj- zBCBuJqkg~h{Qbh-ruQVgjB}o$^`64h5VSU$jAr8}>y>9cAq@?hDj~nlLqg4Wh#tt( zv_2dZCi>m)yY$-RF_98`!}6l)aol^i7cMK^Bs}&CYuu|%TU*e_ zL}_$fNUm_bW$c_U+X(CyIZ*(6#r>Axz zx#)KO^uSg}?;I54hBTM#ur|13G|I*dh<;A#;vPasy(8OH!66qnRXc&lQ&4mcm`jaW z;$3k-gB9*T>hk0I%oq*H)sXkLN}QSeHzbV9@5@L$+<8fP{gd)xO>b^Te|jjuVD(b2RPPd_kN zK3$)FHPvo5T-kU&^)8UE+TxL%0jqvU$<7&#v0MX7`G?MM0|mC=iF(hV3B}^h2@;#~ z8vVOj3;WB%zbdcV;zve@BgS70KQ1P^uATcZ{f$P&fXk&?)q1Ac-pw#4s%F*d_iI z2Q#yHL|om}~Bc_4GnJo zTc>78iWwdS&980s0O$C|enjg6IE;-+ORHlDvbyzR7_VZS*Vp}?!jR$S$5@{kZX-TAw|WiU#R~6 z>C$_%X%S*lNGDCYGnOYBe2JoeU-GvvyQb77c?=xlrV5mDp7)XTZr zCg&xZP}yGA)JPZOSf)zPy9SF|rPC65u@tWvm;XwnI0(I-q%f~M^W$q{8*G24AVSyj zwYI$e?f16VSYe@3KYPK!Yt5^^VPT7zBZl?Q^3Lu>SEOuP+eY}!nkV=y_o-tk`CTUO*e;sNgKq*Z?Q$WV@ib~q&V9SnF#5Ir@%6JKkI?UK`!1F{ zX)P5^eLpMq&0AdiYv$@_8N3_ubT4t)Hr~Ixq$`^{Yz_h^b9=GcQxUXN>6|QIBxtow z>F+E*ejDzSIQmnaZT)voLHyi}D*rDWOP#B*J*qr;>BZd4gK)AKxC`0#?qaJ~Vg06? z!ZH&dvL|9?dNGw{sJqe^(Y^?;>xq!Ct`~F26mIAsR?M>r7G7WWxHse{BD+5mcyB=v znU3-zGkrdvqTt{2$h(~kzSlicaF9>$x3>}e`rr$O$WUzv^zyW0kf!sSLKAu&jvALW zRP`!jagf{~2zi$BGUNUxDBd(@m6PcUUu2(OY}AbJ54|MKhzr#88QSWZi`seb@DoH%icmi$8! zrug|sAsCpzh#Zb(V$8}-ADx%jjp;eqFSSJT;bCm;Fy!~&FgqfgBid%;7SwYLXKP1r z)KY@0^l~spNcv|sYy~8qp=hw6&PoH9cmSpk_Ol((e7gw3z$CpW1TFnN7!#!AK@D>7 zWua)%ME=8lb&_>HLP|V&puL9Jz71W0>T(wtj{cBCFMmZKDj_7z67YIPxUVZ2$g99y zJ_5n;5@=E!qHgIgEks`$)Q&(no3)`7q7d_KpzJ+8_{-xGD1p1!V20G8GMXiXaB(|| zmN1zLuz84pP}Mbr)C#n13Q_n#MQ%s%7p4@$C$kUCjayNrtBAOD6yd55iTK;~(0s?R zKpEV&@&87KqT&1oi%2P=|G%HOUo0Ydf`B9XeE2TTFJT=e{~qk>+=SFg&_fXjZMHTT z;Ub}E7?1A%##x~!f8w+ypgae(bMZ#sC0i`v)jR!zuOGpB(1ZtFNGffI*+GQ${?E8j z^!Xd1L+vyGV}U&Gg2a9>tcs%$+D3Ws5PXXNcXNCM1}GXGyDE4{cz@9UnF$^eOg3}+ z!N!yS-*1)BcbX-R$DjXutp8@3LWG&$L(v>1#b7wTAK-cp%0|vlDUM*N&%7i+RqGkN zd)Q;Y(aAw zeXbKAweT0Q#s_JKmOtyp^PMdqo_Ny0zx-zTMuwBzC>C8tWoEMT5YAN3kf~d|r6q+m z16-q8u?+fOx8d#;?e4CJ%A~f#3*GP3d2fwX-#&i2#NB+L)n({1KjyzoG<&WGV zTRf$q!mg;Pu?q!#v265q3B$7 zPqT|8tNkzQH`SX4pNckXccQ{)H*<3pEWFZ^jTFV_dj>`4FnadCX6%AB(M2hA3I2249BX^18>{Dp03w+tyDa_mF zd?4dP!{WcF1>uq2)!=_UshL^stkjBk9Is@4viSa{l;c&^lGT`h8nvxB=VdZCBiqCq zeARZvH)n(^6{h~**3j(M8GL8^u>YOS2L3=n^WLXltDnD}d_K*;2#UV_u>R)B!7TOv zC*b>kD&Qm5;BMMyjX9-WY3q(Fyp`L@JHt#gTy3=Yaex;mep=IfE2|gzgg>{YO75~0 z=et_|yIX_3>+P&JuT!DbI!RvKZ@&w;#NO9izQ0VX8c5uW7{jShZc5-yA*2Sd$+fx? zu2@1KCie&u5QLXJ6By5hCIRa{@gIGMswyajSf>u}yivPv=mhp(Tnu*K{LuWM<9X7y z@_qQmb)A5XQrwXGvdtR@K4^Kb=cteoobwzbY6L)xo~*0pixLx&2Q#-7)c&UIDptPm zJ@KjBxmEvo^Jz&;i%^P5kJcc4%Vhf9Me~K%{o=k-aSj%z2@dZpNMHT35qM=V$09Du z8uUN;OY43suK@Q!?uf(To7Uy}0VNujC&;b!VTBm}7=ElT*7pd(B4u!ONu4`sc`WE5 z)QqKfE))9tZCImSNO)4?4JGw>&ff8m!*3=^t`q&|W~=(!w0L*(I1Py6K$85h(yf{c zFl9cY{R2(j#W?5xirW5Z?^K&V@9Q`sN1DIE!)9XYf6YXv`j(0O5P2K#UP$t4Q&;kF zbkoJX1`iaY_^DkZGfvVpx3RD3o`-_HMCg^;a4Tg!Vdur58li z7r!O*Pu_9ZYWFbN?&ro%HDhLP_i&lmA|S<$mIK=1Ry4`R^D>K_axd7%lXd*_mpHlN zodi11Vrzwysm4nuXaT=!3nRvpc3z^AmCjJUk@S2ix7!-+#M(`kL3v~x*^)gXQ`2Hh zde#f%MU?0u1GALi^=}jR*Oj+kGsEYpGK} zklDg8eZoL#%iGHjE7CXRhD&S>cVdlhGz^G!?4e5{_hx>noTqhJoc~Q#Ifq>hXE*h| zs?^9#sx;$4@7uU~_snXMcVlx(?3lmrKV>mk>)kxZP3mdQ4@Kl z#dryImi@HA&Olegut^WY&b*S+4*vZ@1+8_%YbEsMoPBy)CYK47m>Bto5hn$4ivbGV zm-~Z0_QD=?df%|BoF}yryBGOTc`tDFe(`vy2#@{J+5^B5y;L`FgBzp`y+6V?vvn2m4jM%pr?54 z^R4xZq0d;%2Gfr~$SGY1ru}~skWWzEawc$*!b*LH2Y`h!$$apfP6OXJf+mH@%0 z^meJ&uq5V_v_)^TrzdZ~(_rnpLV6jdLo7mcp(1oZZFQpoP0OIpZr%Q+&++-mxukh{C1AT|%{~PZ7r0m|! zyxJi$t*(7lEz#{Onaw*IDOe6dMobSZzT-DK zE-y{!ym+1=!8F0<%^LIQ2eK#?$6fo~an zo>jd=J*VOzg za}Ov*K+B6-GhpU3=qyaH_bi;yT=$}V$dLNyU)g5RkEC(;j}7#28DH7PMqOS1j)TjM zU*s6za>K;ts+LPdUl|r1Oa;)+5#I^u+WWV|KJO9=x&Cjv7k7B7X1L5^pzx2SBD37P zlDOGULmu*4SGgly_jz4qJy&2TxYt>5C^%WGz>nrLuTCbZhg*>sjo#lj=EA%x?C$bJ zCNW(_v83eG?Zu+Fj014ZSqq+BepP=Ze4k0f>v6$k{^J?unr?hT__>d!cv18r>ebdM z=F#N$v24Y-ky_+Su+E!z80J49rqa9#e8!G}Vh@CnZv$U+`oI*YvgZ8sDVf*$DA!); zHp;~d$vSZNF3_yav&wedPqV8GI9BWB`Nmm^$B8!Gjd@yev*)-ex>7Hz2 z(#y@hv|?C?6EpX+RJ2sQ%z4~9lY6GB#xg~Zgip^nlSSL#kC#}#KTS=2KhS#k-f8o3 z%Ud?`YQZ$(DaKEv#E3R|W)uDMD{uc&{c8WpAKiCyd!#Z188@Ad z4gylZ>cU+ka%4_UPPde}BepnOZ+?zNWW7bX5e40Jb-^$y!cIMc-=ND0>!NhnXm;aHmFSG1s5(1kF)c66esKxpiEH(RJT=o8)`r@Ou0+D!O(Q&F#RzBh*b5 zy&ljab-(s@VoK3X%+R;VBG7XC0@&$8CIs28pkB$Wh&tRJ>x?E^{ZUol6%kVikL09W zt#vWJ>tyEgLiq^6m*ySCuVotnEHE)5<8u&cW4a`D@-@wEE}Ywt!( z-6qG!x|oCCZ2n(eyJ#lv4GiAOyW6u?O|>KF;X6*_+Uyin@3LQj3r=MH0V|Jsh$Tt; z^drD$&r;>i;CxKX=eG8fQL$u1QCfzyB&eexnrh@RfAdo}t4s_kpj=)k_B2o~fQz@L zYgW8CX{s*!>VO=B7)h?w9vF^Ba9RH9+jmhJD9G|>QX8$c&rbqJ0+KN)y+dOmxbp)7 zym*~(h8mm#*J0D(Ed1RCqO4l6oEDcN?aq4tRa!jB>ou3fwGnjWvtnLJOW4ssd!xzBp_DbRrit`NiD2`{vx*K-I z=>Rr2zkLYn#_s?64rq%e?)Kmo5HOHZjp#O=5sfr&o7IME#^UvaLX9TXKjK%Gbh;gj zk-s6To>eNzDh!#t1@IYJT@a}Q%vtxY*a12YSAH0`NnEPPum%p8;) zgZ7ge86iUV7J2jOXp;)(Z`I1qk=1a>aNDE6NS>NdmZdY|u($1>F)Z(&p&jg|W9aZ4 zinO*lxL!qm=kbvQi|POPQo##U92V7j1|w9K3p>o6r53Ht#i%Y)ZYR zT}rMZ&dw87ougo-xb<5569EWDs6OBY zngq0MreES@^EZ`3gpz|q6l)Z5eskg+r(`nnb-f2yiQNDCQn|P08?|EUy%F^8|LMj(jxX)Z z9GUK5`|p2oH08K6$=ySt>!L*Bm}D9&!y!gC+15_3Gv)mAzWh&qQB;txb+-Ltu#K{L+%zZw#LGIn1|C5NH`dShs;^LzVX2eyhwLusuU6m2- zJ6%k62^KN@Tk)5gA2tt`&4LzgELmx1T1-y&`BLD2lA3qZwO}TcC%gwHJ%1`>*8$aegtF zq}HDRCRF^};O?cQQy>w%ASxU=ndD0J!?}g%bMt*Sl}(mmx*yJ2*;oYe3HB6Jw%>kv zu@4&zPcUhR-{n6!6$#}HV=5}oVo6Q2b07+8<+Q%0Iexb-e-Ep>i6Z2uM?HLrGtZn1 z@FnlNr-qV*m0wOFN$lWf;e4TXVD^kql%mT2WMp^<{RU4vVTeVXw!>yxYv+JL=CB^R#w-)BxojcJq#Xi(R7GBC1%3XUikA7R1-%2eL|lzDmh%*Kep(`uUSTtzJQZqG^8a!r5# zSaPL8(w}FepZj|ox$uGml^FTm@><=#P-h!jBhx2$BAE^Y)@9ooD0XmXL&u8y$BjeB z2xVp{|MWXYf6yBK;^|4@X<6b$0o6!XHK_xb(K5~oU4i`R@Jdv1g(W-^dLgK*G`C7GT`yBo|?7qwV&|2Is zpUQ?A#}~iiK(;Kjft%5|{IKLHom<&QLYj*j-SFYxPC*HB+zQ?L0tMkJ@zt#HQ zM~@eS(=d$~L;IJ*z^MbMa@C<>-#enjAcrpGFtgxPCGe+()Y9>hxLC&-#5oS-zV1Iv z6##!yxef-nSd@1B;?Aj8-#Yf}db-xTiPp`=ORmov{F$rS3-VHnYbKaSH+3O zAQ*3pb?vA5RwFi6LqaoV2E(nX5%8e-(Hb47!UtiIO8EW1z#-D@uk#&VQGdmv87#Vs zyT_x!Fkzdel1=$G8&yt#id%Wj{9m|kWWwa%i=sPk(Dqv2Jp7hq*)hRoh_IJlU%k70 z=Y2w?DD>K|Znuiw7|YfS%R~OcF%~({mS2=wwbSW_T_Ea0z{aEjG65lPT)BdCnm~3; z!2gn5f3-BEb-gnx_h*A^Zv9Axe#7)pD!bG3f4{T0XAq$}vaxr8HrHu6s7=Yn z+YCXG_q2Uix%&OhK~zRQDh zzavm@S*M?m7!zc!YZc7YvrIaXHTm(I$zbyby@L+QUShM#-Qg&Fb7ey|!niAg`RS(s z-Wb+8p$Jx#`(>Zm<;$r$YudRJk4^Zj{MB*=L`_B`S-B+t@H^x#@Ja}IBBi(U&Wq)g zSxV&8;MH2VcLma3$M}iHHtl!}?M88R_`$cLzy=Hk+Yo{<)Pu;;81v{)b(qkKHe0bv z_QqvhT0hq7^hX~B4BTmceK|qEEBW-y4)0%8XKPcf_Q2Lz9O-#&G@4c9dz-GFRLWDi zaV7Vg&UARV3cm+AiH$0#O9Dclr3f98d>zD~?H1H<)`<8S6I%~hpFNxuiU~{5(h@Cu zv4~AzCZEw&!P((f>odj5!%%Ue#~(r8D?Fd!5+Fzo-ElUmhVKFEaQh2(SLAQ-LhOw6 zV#3^Sr|U15atq(>VOSi~+c1Y^ZClZk<+QqTZ&`So(sYwac<`KY>$;G!blHPzwW)sPvZSDsv-cBw8{I@O;Y&B1uvE*@EJ!-jNK3 zqxFcTooa%SiJ3L59i!eqfH#F*73^c#BmWNf7-+)Q z;gTJcdur$sjZ7FWXZ)K>B zb3UAG`c={3ul;v8LfJJ|I!ujyRqYb1dK7xLNcqSwCwpJu3T*2@i5l+-d!&l04%om?~As&s6c*Bpj&y)P>$YFxT50;OZ?au z-S6cULr=ug?K?U}V_M#tTnHt9!%{shq5i>ZmGS$}X9K~2ZWg1*Gvb1y`&Wd8JXw-X zM@0m?Y^0Oi+?Avc`|Jh1jxBnVMi*D-`)k(+uhQT8H;-pdZpc=06qyQ@6c^rmFQi^TcVU)qm$7_w4zqSkfI>1M~dh zaWAD(tcvv01QxE4UP?T+B@YL6(;#vJgbcc9y}}Izqu7z(3oUpla<~uS;wy+9JzP|; zM3PhPsJk*GQ%uH=LvY;!)^7OqOyJ7*XAZpPjvZWATJb}v;ny#;d0u=SA?13#pb+!j zeqUW(YN8kOV^tu8;7V1>{Cv?bnmL;`fcxS;x_05(Z>V~g?z$W`DL=s?ZQ(48KSYVM z)C?GIbj!^V{QG9!3`1VL&@YJ)X^1cW*-j52x&Re+fb=V$5`)~j((LYG9!EcwE+o7! zhmk2X3_S1rn-HvrjJIKF^OKIFYu%TX$A(U(Ua%v&Sx+bE-B9x;aT_VZgvQ%K@n0_X z9+L};-B#2TkZ@VEvU+aLORr!Mag|-}s$I=~gEz~~bG2(SJ4IDTXMPVy4mlI&=s12| zFC}|BW!&P4p88mUKX}#Y(ew&7liP!rjhz3YV6ph=!vxXf+?%b3re*SLcZaO%*Tv

f^-nHKXF93^2FJ+CEZchZn_(2Ja`fx`gAG{m`&4Y>T`hz}M(HCrcDV{ZR>_agx z)@U0T)J!v$2l|)ysQryS6&GI_03nx{h+&B_qP4<9SS%J%p>p{gF+B$M@F>Q6>*6fA zcUV17FY+Hgj$@(z)ku0;rbDsdGbRZ}z293%pC4;z)xY&yDRF#sB0@P6j;N(lHvfC_ z4rz|Rz$kq3)Kzv;vvqk=z8}+zeiV1vHQK}Fw8G_J>K;paFf@v&2d~8-rYY{@f-YS9$WOF zwFDW@@uzv)>tUtxTwCzo@tW|)HCY{nNbZfS+U@0Ee}<}iPqNG!7>pavO-Yq>H-EeG#^FtUrQ}~}6&~&t1+@uSuph#LFYSSf0??L`Cj_YtQIrU= z-Y#pfSxVzWESew%i#ll2wfE8Di8dP_y6HJYP zc|x$z$vAHIKv)H0m!f{WOC#+xMyC9}MDfuQ6`79rcl1)?nYU+?klF1pefMqQvYf$3pe|2(F#Q?oq-M=rWIg43K0O@&Ep3P zd1xsx)XLcGbn{pa+o{IGeVB3dAdu?neq90hilu_g62PV zSyqzry~&frIa{+Tm7enZB-Kw?`7uVN6lHyM^|8`?1xc2|AZ~`<_L3!-b$#pQRU{qm zy2RzC50>bsm*@rBQMw01hAz*ca0v?Nu{GDI?Bzp_)^%2hD_Di-aGwWv{azI6^3sAj zS~9SVtpzC>fCo{Ce{mq9?E!+cpWzPWAs z=g0Y_l1~UaiV09Dbja9!i4cDs4q*$qyrdIyK-|TBqWeXmRDJDdGt7BEpr^FEK-yQ& zr^i3%)zTT)>u)W3PstBtnGsF`!4B7%y5MU#`%-a;JNqz21;6kToo63YKjFD?h1^%g z+yiJYRY1#q{~(U^vjruBv9cWPOU!Ki0_vKyl>gz<;0;kTjb89K&_0&?5ul3t zERd%P50(p4&b#JIE?a`zz7kqNFI&zDG9i?aa1f3*;~3E8gjK6Udi<+q+K`7vXFq~s z1#Wc?vT$yikWIeR5t4irMb(S=p;cz74sBY+3T?yCI{ZMm7gcxc-lMEUYlNAlFqjQ} z-LiVxKM5&Vyf2Uod!-^w+9Ix=`aijMS3LJnFhe zh66epZLd0!wc9+omQwbS+ zGbbo6M&sHqF3Q~(>BP^!-v~?U49@)UWLOBswtE-LqQZVyI<}vR|E~1r(BDt!{Z*sS zJEcH>JWQwS4nz_46$I~rD;N%WYH&qHv|J_6<_!fJJBg5pf3m~?dWM)Ik!ZG9?4k$?uWA>|`Iv_3fHtu-TQ2H>HO)eLs_OttI?TU+&uy&i;eC`S$-qb#i3*TKh;=pMlx{uuYVR-Filu3Ei1ry?;g#|n zd~H>}g4l?QN{`PD`1@daiJ0CC<0hu~B=#x{&-)&j2Y;M8zH!SZLyFI_p5o(-d{0Jw zKI;JoLps@NP5$t;w+z-v*-Pr{0}1txdV;ui;p14fMC{jx3C+}x6Gbx~Jw_d0mtPwb zv2qN!cB{@oNdju+XmyGeeXQfIcHe$?dF@L0>FpBtY&oSv&bWBx*^I;4SzCkOBs|;b zDqF2?VIG_9ckWBmH+a0}M{n2ori?%E)RrDUbzh)kX%rfVSHA2B*Uo6N7S)W z*8d?|Nu^?i&FyfKd!#@d&ZWCRqnc+H$dcMT@?r2EURa{)?3Qm==4yBGAE%8S>YJC7 z7cL+8?MtWdG?IK?4f`o&b@RFPYMQ=Hk&2IL$WL6io(lS0?0Wj+P`L%1MX8Yz*|<~$ zSCW2DzGw8!k(=iadnjT#-R`73Z+gpOv2iSCFKilfOB7C|uU_`0NDDz;$ZeQfbiP9- zyFHAMC6RPAb!u!$a*< z)zeWpfm+>C+rH<29lsNm{a)X=CUc5N$Ab-Q>>3*WOT*;HpNIINOVDU4_*gW9*d%Nm z5`lj3Y8>)%%RKR(liPoR+D(!aT?pJCGb}_IIBQ-o&&r&dofcK>Tg)bA8lu*Y6>GWQ zh5L5JI_oM785Y7}IN_S@N7=sTDK`1CGQj7}kO@UW6K%%5(Sxz4W9c)KLpvr%M5hL= zza%e`cuvpqRc-$EG;sLI-cWFRl|!T8@-`bg#pj?)!Y%qDSX4=q43CqV2xwCkM7O8w zb$=Vx+xV21&tApM?>?ZUH-+Xa$Q)nQEBj(xcs|Bjc)ltjsAzZTCbUa0{O*+u1E!QR z`shQ~x6FaxSiI=1=w`2b6R1T#;oL?AnU+8A4<&{yNC(PZg`$HA?WR66&n;8fck_!n z=AT~@6j^yHycgX%R$k?7*>>*neJNHkI>uX^f#s*2TyptuS{4rkS@OMglaKJ!*ekrv zkyoOxXxso?yvb`i30W87cHSg+@Vh=5XEwBgn`lZwOdC9Z#6+k--0xvU1udh_vW!v1 zAvv+f>^DoJ-hL8cj|mya-$u&GGJ(>`?DNwZ>VU+o`ad#-R;RSDVA)i zwl~(J@sdfbS8~!H`-b1{4*byk*4f?V;msd2BDx<(aPU$eZg(N$^#Bwa#ZbVOlkqnaSK2kVI3f-5n-J`Qr^Z<6_*1BC<8w$yK+@fYB` z?z@2y9|MkC1hf~cOh7gd5j(RJ=TkpB^8=7lCl@)z;@F;fYCH$fWp_J|h#>aLj5`+e z{Bbqo@R$GdJ?!Tx@$HpRHDN8LKhE*~wqAW{%w;HTRc|!4d37Hm;~Ke1 zV3osYFAy#RfOfiBe2De>Pq@|v5ap}}sH(p&Ziio*G9msU5LY~=n9u%J15+lNO=8~j1JGllHYJwQ>4NuenQdmBb>QlNor0FeC`eMnL{K>yk6i5k6 zzReHj5imQ9Tbr6cxN)PvuOK90K zTqx!uOURAzS20t8syvLpcIJpzZ~=>0Jgnl{45AbbN}&bf2Hc-YFX{}1)8QjmicgDj z>KuQayK^x*jp=Dpx^~D{%`<(I6|gT(x|Gx9mCI}KAWFVw;+0h{sSCvtupll5Z*-yA zMc@`C!)m4pxm{6u%_Hc_>q(>#2PiPGpO(Pn2=IarJf0@q-PaG-d-9=2K$`ApRO919s)Br*e{UhuIfYCbSvuuA%8^tj%qO^i>ezI8W5x~3|dDB z#%*cW+pwYv5C+Yfn90dLOtXSGc5;ryB3ZB~Igma9%=ZeBZ?RX^QicgxL}019b<4{h zaMTZiPT_Njphf?v(XlKyH(dRbQJE!>g?Z5Hkr3Wc)E^&jYDuK4J|^WzX%}_^2hCz5{f}iX{u29^STBPdm{gp=C z{}r$bPxTkzCcOz^N&Y2Q^8botd>H@bn*0Yp_3a4XL)-RdH%^d~VJmzOczJvIAjrA} z^qcB-(C=9UkN~@DU|$*(*j5G;q3qur@=utUqJO!D{U49~F9{#6|JjuPw)TH~KTVTh zsPSMDz5jz*oHUy5MXb$XSdq^8sH_wfHDLJ`dW=Ai#aQ-8>_p!)B6`hOCx z*BN!%awfKbt|tE?p=bX`nE%PD{JZCWL^Eh28dQWY3zrc)bQZ8rgfAlGnSb zCNexZgBO10ku{Nna7*NYd26D)(=1WXOhFRN+I zz`G7v1_`1gW9OXV4Ms0V4w-n6H^fRs#>KzHi)-8$*?tFb7tsh^ckG}WNy}9rX}OS- z;NQxB5l)aeLv?mn&;OEn?Eea3{s&j&-`DHL zR9FPu1@ylH*W>e8%KvL4{?8Xd{ztCGfBYOp|5-V(!$TL8e>(PK#7Pqi@-O7K5&wbv z|7^;CTlv3%?-?cl*@djeq~i-4NBaiIN9%W_jQR3=`PfNoQ|MekFJGIQ*hJF5L$`#b ziQ(wX-*Fs|LM8@ z6G>qI53a8Nrli8Izgtt{ zBBlHP!u4M#;IGr*+EQjeqXYO0?D>BgCcTbS*=&>EfX{>YGlRe0Xewu*>EFM@Uo4ij z;GahrN_1L?KO^{0N@M&pt{;D~P+;@_1gQoBPb^a7+Hy5cK>QiOzetfJXSh+*Rqz)` zJoul5A}P%OWpn-S&j0p^bB=C!CfoAFu0FuBG~QCT_8d60Jj!w`_IL6aJ{0}?{1V=U zGiB(-6Dr=N1KH^19a+4l=i*Uh&2XOAvS8G^x;xKj?l9ns$65St4+7m%11VHmf({|(2$0eme8GB&>qik=An+KbThdQY2- z_Q_H3!jola#BeDua!?WI8yLxp@|J+8U&WSwUqpcDW8s$mZ~g^hUOsObPOFL9eWYdFt{2hq5B_br?eY836y2wm)Y|94gu*?Rw9*wIJ<(#xNUsDL zza8ggCHx1?I{qpzCq#qhY<-kB@oWK_yKbu`@1r>&f59V`g6(;raOOOVeDx@B``Brg zNmVE)iXLN8O#T%pF7~$+Oa2RrzaGt-8uJS}_3dchw9CiQ=^HQeW}JQ%omorq?%4Av zI;(UaZ}x^76w5zt!S8(plq57*l;!h4X~>fnRn|11I$LWgj~fHZkCa&m|Nem3lx>;w zO%teCt>S4KqtVK$ES`4DWmH!@oJT%Lp>w6(d3xPG)X?WF&sew-H8urWs*)N&)#)EB z)satv>ODs+HRo%={0&VF&lxm?6wLfe1m z`cI`ZX`uaQXcQ6Ja-7uZRh+_KA``Z(|CznRTYdd!(pRWzxF-J+0nPvFz(2y8=KRJ#0X#Vx%FO*4X{4R>xmURH^g^MgxEyiT~U6za4*uZ}3*fzbLU& ziNAQ=4r0)7ZlO`vcg4bwO5PxRySLwB! zl7Bn@Uo-v%s(G`g>GTy){+Yl(8=r?01i~r&`BL}%zigdWr!i?v5Pv4{cdGt`BHI_i zND;_&<1ge3L;^SYciv%Qu2xqK@xRgd=MzdbQO+sHNl(y9Wt{eH7e=99+e8KQR$_{Xi~pbAn7w4 zO}R@x6uAM!28NV|k) z`n?CTVn0W-e)zXv6^y@o39I1EY$Jcg>qZ3blluY&Ua0FJ}+GmPToq7NumrBsNw~s~jWf7=B5Q7>s{z6S*2wIic2UJ{I+*{1*i diff --git a/awx/lib/site-packages/django_auth_ldap/__init__.py b/awx/lib/site-packages/django_auth_ldap/__init__.py index 88ef52114c..38711b9b4b 100644 --- a/awx/lib/site-packages/django_auth_ldap/__init__.py +++ b/awx/lib/site-packages/django_auth_ldap/__init__.py @@ -1,2 +1,2 @@ -version = (1, 1, 4) -version_string = "1.1.4" +version = (1, 1, 6) +version_string = '1.1.6' diff --git a/awx/lib/site-packages/django_auth_ldap/backend.py b/awx/lib/site-packages/django_auth_ldap/backend.py index 3dd15a51ae..db6fe2ea8b 100644 --- a/awx/lib/site-packages/django_auth_ldap/backend.py +++ b/awx/lib/site-packages/django_auth_ldap/backend.py @@ -45,17 +45,12 @@ information will be user_dn or user_info. Additional classes can be found in the config module next to this one. """ -try: - set -except NameError: - from sets import Set as set # Python 2.3 fallback - +import ldap import sys import traceback import pprint import copy -import django.db from django.contrib.auth.models import User, Group, Permission, SiteProfileNotAvailable from django.core.cache import cache from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist @@ -66,7 +61,7 @@ try: from django.contrib.auth import get_user_model get_user_username = lambda u: u.get_username() except ImportError: - get_user_model = lambda: User + get_user_model = lambda: User # noqa get_user_username = lambda u: u.username @@ -91,7 +86,7 @@ class LDAPBackend(object): supports_inactive_user = False _settings = None - _ldap = None # The cached ldap module (or mock object) + _ldap = None # The cached ldap module (or mock object) # This is prepended to our internal setting names to produce the names we # expect in Django's settings file. Subclasses can change this in order to @@ -131,6 +126,14 @@ class LDAPBackend(object): return self._ldap ldap = property(_get_ldap) + def get_user_model(self): + """ + By default, this will return the model class configured by + AUTH_USER_MODEL. Subclasses may wish to override it and return a proxy + model. + """ + return get_user_model() + # # The Django auth backend API # @@ -149,8 +152,8 @@ class LDAPBackend(object): user = None try: - user = get_user_model().objects.get(pk=user_id) - _LDAPUser(self, user=user) # This sets user.ldap_user + user = self.get_user_model().objects.get(pk=user_id) + _LDAPUser(self, user=user) # This sets user.ldap_user except ObjectDoesNotExist: pass @@ -171,7 +174,7 @@ class LDAPBackend(object): def get_group_permissions(self, user, obj=None): if not hasattr(user, 'ldap_user') and self.settings.AUTHORIZE_ALL_USERS: - _LDAPUser(self, user=user) # This sets user.ldap_user + _LDAPUser(self, user=user) # This sets user.ldap_user if hasattr(user, 'ldap_user'): return user.ldap_user.get_group_permissions() @@ -198,7 +201,7 @@ class LDAPBackend(object): username is the Django-friendly username of the user. ldap_user.dn is the user's DN and ldap_user.attrs contains all of their LDAP attributes. """ - model = get_user_model() + model = self.get_user_model() username_field = getattr(model, 'USERNAME_FIELD', 'username') kwargs = { @@ -322,12 +325,12 @@ class _LDAPUser(object): user = self._user except self.AuthenticationFailed, e: logger.debug(u"Authentication failed for %s" % self._username) - except self.ldap.LDAPError, e: + except ldap.LDAPError, e: logger.warning(u"Caught LDAPError while authenticating %s: %s", - self._username, pprint.pformat(e)) + self._username, pprint.pformat(e)) except Exception: logger.exception(u"Caught Exception while authenticating %s", - self._username) + self._username) raise return user @@ -343,9 +346,9 @@ class _LDAPUser(object): if self.settings.FIND_GROUP_PERMS: try: self._load_group_permissions() - except self.ldap.LDAPError, e: + except ldap.LDAPError, e: logger.warning("Caught LDAPError loading group permissions: %s", - pprint.pformat(e)) + pprint.pformat(e)) return self._group_permissions @@ -362,12 +365,12 @@ class _LDAPUser(object): self._get_or_create_user(force_populate=True) user = self._user - except self.ldap.LDAPError, e: + except ldap.LDAPError, e: logger.warning(u"Caught LDAPError while authenticating %s: %s", - self._username, pprint.pformat(e)) + self._username, pprint.pformat(e)) except Exception, e: logger.error(u"Caught Exception while authenticating %s: %s", - self._username, pprint.pformat(e)) + self._username, pprint.pformat(e)) logger.error(''.join(traceback.format_tb(sys.exc_info()[2]))) raise @@ -422,12 +425,12 @@ class _LDAPUser(object): sticky = self.settings.BIND_AS_AUTHENTICATING_USER self._bind_as(self.dn, password, sticky=sticky) - except self.ldap.INVALID_CREDENTIALS: + except ldap.INVALID_CREDENTIALS: raise self.AuthenticationFailed("User DN/password rejected by LDAP server.") def _load_user_attrs(self): if self.dn is not None: - search = LDAPSearch(self.dn, self.ldap.SCOPE_BASE) + search = LDAPSearch(self.dn, ldap.SCOPE_BASE) results = search.execute(self.connection) if results is not None and len(results) > 0: @@ -449,7 +452,7 @@ class _LDAPUser(object): def _construct_simple_user_dn(self): template = self.settings.USER_DN_TEMPLATE - username = self.ldap.dn.escape_dn_chars(self._username) + username = ldap.dn.escape_dn_chars(self._username) self._user_dn = template % {'user': username} @@ -566,8 +569,10 @@ class _LDAPUser(object): logger.warning("%s does not have a value for the attribute %s", self.dn, attr) def _populate_user_from_group_memberships(self): - for field, group_dn in self.settings.USER_FLAGS_BY_GROUP.iteritems(): - value = self._get_groups().is_member_of(group_dn) + for field, group_dns in self.settings.USER_FLAGS_BY_GROUP.iteritems(): + if isinstance(group_dns, basestring): + group_dns = [group_dns] + value = any(self._get_groups().is_member_of(dn) for dn in group_dns) setattr(self._user, field, value) def _populate_and_save_user_profile(self): @@ -616,8 +621,10 @@ class _LDAPUser(object): """ save_profile = False - for field, group_dn in self.settings.PROFILE_FLAGS_BY_GROUP.iteritems(): - value = self._get_groups().is_member_of(group_dn) + for field, group_dns in self.settings.PROFILE_FLAGS_BY_GROUP.iteritems(): + if isinstance(group_dns, basestring): + group_dns = [group_dns] + value = any(self._get_groups().is_member_of(dn) for dn in group_dns) setattr(profile, field, value) save_profile = True @@ -630,7 +637,7 @@ class _LDAPUser(object): """ group_names = self._get_groups().get_group_names() groups = [Group.objects.get_or_create(name=group_name)[0] for group_name - in group_names] + in group_names] self._user.groups = groups @@ -645,9 +652,9 @@ class _LDAPUser(object): """ group_names = self._get_groups().get_group_names() - perms = Permission.objects.filter(group__name__in=group_names - ).values_list('content_type__app_label', 'codename' - ).order_by() + perms = Permission.objects.filter(group__name__in=group_names) + perms = perms.values_list('content_type__app_label', 'codename') + perms = perms.order_by() self._group_permissions = set(["%s.%s" % (ct, name) for ct, name in perms]) @@ -670,9 +677,8 @@ class _LDAPUser(object): Binds to the LDAP server with AUTH_LDAP_BIND_DN and AUTH_LDAP_BIND_PASSWORD. """ - self._bind_as(self.settings.BIND_DN, - self.settings.BIND_PASSWORD, - sticky=True) + self._bind_as(self.settings.BIND_DN, self.settings.BIND_PASSWORD, + sticky=True) def _bind_as(self, bind_dn, bind_password, sticky=False): """ @@ -684,7 +690,7 @@ class _LDAPUser(object): the credentials, after which the connection will be considered unbound. """ self._get_connection().simple_bind_s(bind_dn.encode('utf-8'), - bind_password.encode('utf-8')) + bind_password.encode('utf-8')) self._connection_bound = sticky @@ -693,7 +699,11 @@ class _LDAPUser(object): Returns our cached LDAPObject, which may or may not be bound. """ if self._connection is None: - self._connection = self.ldap.initialize(self.settings.SERVER_URI) + uri = self.settings.SERVER_URI + if callable(uri): + uri = uri() + + self._connection = ldap.initialize(uri) for opt, value in self.settings.CONNECTION_OPTIONS.iteritems(): self._connection.set_option(opt, value) @@ -743,8 +753,10 @@ class _LDAPUserGroups(object): if self._group_names is None: group_infos = self._get_group_infos() - self._group_names = set([self._group_type.group_name_from_info(group_info) - for group_info in group_infos]) + self._group_names = set( + self._group_type.group_name_from_info(group_info) + for group_info in group_infos + ) self._cache_attr("_group_names") return self._group_names @@ -777,7 +789,7 @@ class _LDAPUserGroups(object): """ if self._group_dns is None: group_infos = self._get_group_infos() - self._group_dns = set([group_info[0] for group_info in group_infos]) + self._group_dns = set(group_info[0] for group_info in group_infos) return self._group_dns @@ -788,7 +800,7 @@ class _LDAPUserGroups(object): """ if self._group_infos is None: self._group_infos = self._group_type.user_groups(self._ldap_user, - self._group_search) + self._group_search) return self._group_infos diff --git a/awx/lib/site-packages/django_auth_ldap/config.py b/awx/lib/site-packages/django_auth_ldap/config.py index d01b7d09f7..693e9faef5 100644 --- a/awx/lib/site-packages/django_auth_ldap/config.py +++ b/awx/lib/site-packages/django_auth_ldap/config.py @@ -30,11 +30,7 @@ Please see the docstring on the backend module for more information, including notes on naming conventions. """ -try: - set -except NameError: - from sets import Set as set # Python 2.3 fallback - +import ldap import logging import pprint @@ -56,7 +52,6 @@ class _LDAPConfig(object): of python-ldap. """ if cls.ldap is None: - import ldap import ldap.filter # Support for python-ldap < 2.0.6 @@ -154,11 +149,12 @@ class LDAPSearch(object): try: filterstr = self.filterstr % filterargs results = connection.search_s(self.base_dn.encode('utf-8'), - self.scope, filterstr.encode('utf-8')) - except self.ldap.LDAPError, e: + self.scope, + filterstr.encode('utf-8')) + except ldap.LDAPError, e: results = [] logger.error(u"search_s('%s', %d, '%s') raised %s" % - (self.base_dn, self.scope, filterstr, pprint.pformat(e))) + (self.base_dn, self.scope, filterstr, pprint.pformat(e))) return self._process_results(results) @@ -170,11 +166,11 @@ class LDAPSearch(object): try: filterstr = self.filterstr % filterargs msgid = connection.search(self.base_dn.encode('utf-8'), - self.scope, filterstr.encode('utf-8')) - except self.ldap.LDAPError, e: + self.scope, filterstr.encode('utf-8')) + except ldap.LDAPError, e: msgid = None logger.error(u"search('%s', %d, '%s') raised %s" % - (self.base_dn, self.scope, filterstr, pprint.pformat(e))) + (self.base_dn, self.scope, filterstr, pprint.pformat(e))) return msgid @@ -184,9 +180,9 @@ class LDAPSearch(object): """ try: kind, results = connection.result(msgid) - if kind != self.ldap.RES_SEARCH_RESULT: + if kind != ldap.RES_SEARCH_RESULT: results = [] - except self.ldap.LDAPError, e: + except ldap.LDAPError, e: results = [] logger.error(u"result(%d) raised %s" % (msgid, pprint.pformat(e))) @@ -205,7 +201,8 @@ class LDAPSearch(object): result_dns = [result[0] for result in results] logger.debug(u"search_s('%s', %d, '%s') returned %d objects: %s" % - (self.base_dn, self.scope, self.filterstr, len(result_dns), "; ".join(result_dns))) + (self.base_dn, self.scope, self.filterstr, len(result_dns), + "; ".join(result_dns))) return results @@ -349,12 +346,17 @@ class PosixGroupType(LDAPGroupType): try: user_uid = ldap_user.attrs['uid'][0] - user_gid = ldap_user.attrs['gidNumber'][0] - filterstr = u'(|(gidNumber=%s)(memberUid=%s))' % ( - self.ldap.filter.escape_filter_chars(user_gid), - self.ldap.filter.escape_filter_chars(user_uid) - ) + if 'gidNumber' in ldap_user.attrs: + user_gid = ldap_user.attrs['gidNumber'][0] + filterstr = u'(|(gidNumber=%s)(memberUid=%s))' % ( + self.ldap.filter.escape_filter_chars(user_gid), + self.ldap.filter.escape_filter_chars(user_uid) + ) + else: + filterstr = u'(memberUid=%s)' % ( + self.ldap.filter.escape_filter_chars(user_uid), + ) search = group_search.search_with_additional_term_string(filterstr) groups = search.execute(ldap_user.connection) @@ -370,17 +372,17 @@ class PosixGroupType(LDAPGroupType): """ try: user_uid = ldap_user.attrs['uid'][0] - user_gid = ldap_user.attrs['gidNumber'][0] try: is_member = ldap_user.connection.compare_s(group_dn.encode('utf-8'), 'memberUid', user_uid.encode('utf-8')) - except self.ldap.NO_SUCH_ATTRIBUTE: + except (ldap.UNDEFINED_TYPE, ldap.NO_SUCH_ATTRIBUTE): is_member = False if not is_member: try: + user_gid = ldap_user.attrs['gidNumber'][0] is_member = ldap_user.connection.compare_s(group_dn.encode('utf-8'), 'gidNumber', user_gid.encode('utf-8')) - except self.ldap.NO_SUCH_ATTRIBUTE: + except (ldap.UNDEFINED_TYPE, ldap.NO_SUCH_ATTRIBUTE): is_member = False except (KeyError, IndexError): is_member = False @@ -414,7 +416,7 @@ class MemberDNGroupType(LDAPGroupType): self.member_attr.encode('utf-8'), ldap_user.dn.encode('utf-8') ) - except self.ldap.NO_SUCH_ATTRIBUTE: + except (ldap.UNDEFINED_TYPE, ldap.NO_SUCH_ATTRIBUTE): result = 0 return result @@ -442,13 +444,14 @@ class NestedMemberDNGroupType(LDAPGroupType): those groups belong to, etc. Circular references will be detected and pruned. """ - group_info_map = {} # Maps group_dn to group_info of groups we've found - member_dn_set = set([ldap_user.dn]) # Member DNs to search with next - handled_dn_set = set() # Member DNs that we've already searched with + group_info_map = {} # Maps group_dn to group_info of groups we've found + member_dn_set = set([ldap_user.dn]) # Member DNs to search with next + handled_dn_set = set() # Member DNs that we've already searched with while len(member_dn_set) > 0: group_infos = self.find_groups_with_any_member(member_dn_set, - group_search, ldap_user.connection) + group_search, + ldap_user.connection) new_group_info_map = dict([(info[0], info) for info in group_infos]) group_info_map.update(new_group_info_map) handled_dn_set.update(member_dn_set) @@ -520,3 +523,20 @@ class NestedActiveDirectoryGroupType(NestedMemberDNGroupType): """ def __init__(self, name_attr='cn'): super(NestedActiveDirectoryGroupType, self).__init__('member', name_attr) + + +class OrganizationalRoleGroupType(MemberDNGroupType): + """ + An LDAPGroupType subclass that handles groups of class organizationalRole. + """ + def __init__(self, name_attr='cn'): + super(OrganizationalRoleGroupType, self).__init__('roleOccupant', name_attr) + + +class NestedOrganizationalRoleGroupType(NestedMemberDNGroupType): + """ + An LDAPGroupType subclass that handles groups of class OrganizationalRoleGroupType + with nested group references. + """ + def __init__(self, name_attr='cn'): + super(NestedOrganizationalRoleGroupType, self).__init__('roleOccupant', name_attr) diff --git a/awx/lib/site-packages/django_auth_ldap/dn.py b/awx/lib/site-packages/django_auth_ldap/dn.py index 78271abbd4..e234303f5f 100644 --- a/awx/lib/site-packages/django_auth_ldap/dn.py +++ b/awx/lib/site-packages/django_auth_ldap/dn.py @@ -27,6 +27,7 @@ This is an ldap.dn replacement for old versions of python-ldap. It contains (often naive) implementations of the methods we care about. """ + def escape_dn_chars(dn): "Old versions of python-ldap won't get DN escaping. Use with care." return dn diff --git a/awx/lib/site-packages/django_auth_ldap/tests.py b/awx/lib/site-packages/django_auth_ldap/tests.py index 0bdbff5957..ffa1eb8b38 100644 --- a/awx/lib/site-packages/django_auth_ldap/tests.py +++ b/awx/lib/site-packages/django_auth_ldap/tests.py @@ -24,22 +24,22 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -try: - set -except NameError: - from sets import Set as set # Python 2.3 fallback -from collections import defaultdict from copy import deepcopy import logging import pickle -import sys + +import ldap +try: + import mockldap +except ImportError: + mockldap = None from django.conf import settings import django.db.models.signals from django.contrib.auth.models import User, Permission, Group from django.test import TestCase - +from django.utils import unittest try: from django.test.utils import override_settings except ImportError: @@ -47,7 +47,7 @@ except ImportError: from django_auth_ldap.models import TestUser, TestProfile from django_auth_ldap import backend -from django_auth_ldap.config import _LDAPConfig, LDAPSearch, LDAPSearchUnion +from django_auth_ldap.config import LDAPSearch, LDAPSearchUnion from django_auth_ldap.config import PosixGroupType, MemberDNGroupType, NestedMemberDNGroupType from django_auth_ldap.config import GroupOfNamesType @@ -63,299 +63,11 @@ class TestSettings(backend.LDAPSettings): setattr(self, name, value) -class MockLDAP(object): - """ - This is a stand-in for the python-ldap module; it serves as both the ldap - module and the LDAPObject class. While it's temping to add some real LDAP - capabilities here, this is designed to remain as simple as possible, so as - to minimize the risk of creating bogus unit tests through a buggy test - harness. - - Simple operations can be simulated, but for nontrivial searches, the client - will have to seed the mock object with return values for expected API calls. - This may sound like cheating, but it's really no more so than a simulated - LDAP server. The fact is we can not require python-ldap to be installed in - order to run the unit tests, so all we can do is verify that LDAPBackend is - calling the APIs that we expect. - - set_return_value takes the name of an API, a tuple of arguments, and a - return value. Every time an API is called, it looks for a predetermined - return value based on the arguments received. If it finds one, then it - returns it, or raises it if it's an Exception. If it doesn't find one, then - it tries to satisfy the request internally. If it can't, it raises a - PresetReturnRequiredError. - - At any time, the client may call ldap_methods_called_with_arguments() or - ldap_methods_called() to get a record of all of the LDAP API calls that have - been made, with or without arguments. - """ - class PresetReturnRequiredError(Exception): - pass - - SCOPE_BASE = 0 - SCOPE_ONELEVEL = 1 - SCOPE_SUBTREE = 2 - - RES_SEARCH_RESULT = 101 - - class LDAPError(Exception): - pass - - class INVALID_CREDENTIALS(LDAPError): - pass - - class NO_SUCH_OBJECT(LDAPError): - pass - - class NO_SUCH_ATTRIBUTE(LDAPError): - pass - - # - # Submodules - # - class dn(object): - def escape_dn_chars(s): - return s - escape_dn_chars = staticmethod(escape_dn_chars) - - class filter(object): - def escape_filter_chars(s): - return s - escape_filter_chars = staticmethod(escape_filter_chars) - - class cidict(object): - class cidict(dict): - pass - - def __init__(self, directory): - """ - directory is a complex structure with the entire contents of the - mock LDAP directory. directory must be a dictionary mapping - distinguished names to dictionaries of attributes. Each attribute - dictionary maps attribute names to lists of values. e.g.: - - { - "uid=alice,ou=users,dc=example,dc=com": - { - "uid": ["alice"], - "userPassword": ["secret"], - }, - } - """ - self.directory = self.cidict.cidict(directory) - - self.reset() - - def reset(self): - """ - Resets our recorded API calls and queued return values as well as - miscellaneous configuration options. - """ - self.calls = [] - self.return_value_maps = defaultdict(lambda: {}) - self.async_results = [] - self.options = {} - self.tls_enabled = False - - def set_return_value(self, api_name, arguments, value): - """ - Stores a preset return value for a given API with a given set of - arguments. - """ - self.return_value_maps[api_name][arguments] = value - - def ldap_methods_called_with_arguments(self): - """ - Returns a list of 2-tuples, one for each API call made since the last - reset. Each tuple contains the name of the API and a dictionary of - arguments. Argument defaults are included. - """ - return self.calls - - def ldap_methods_called(self): - """ - Returns the list of API names called. - """ - return [call[0] for call in self.calls] - - # - # Begin LDAP methods - # - - def set_option(self, option, invalue): - self._record_call('set_option', { - 'option': option, - 'invalue': invalue - }) - - self.options[option] = invalue - - def initialize(self, uri, trace_level=0, trace_file=sys.stdout, trace_stack_limit=None): - self._record_call('initialize', { - 'uri': uri, - 'trace_level': trace_level, - 'trace_file': trace_file, - 'trace_stack_limit': trace_stack_limit - }) - - value = self._get_return_value('initialize', - (uri, trace_level, trace_file, trace_stack_limit)) - if value is None: - value = self - - return value - - def simple_bind_s(self, who='', cred=''): - self._record_call('simple_bind_s', { - 'who': who, - 'cred': cred - }) - - value = self._get_return_value('simple_bind_s', (who, cred)) - if value is None: - value = self._simple_bind_s(who, cred) - - return value - - def search(self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0): - self._record_call('search', { - 'base': base, - 'scope': scope, - 'filterstr': filterstr, - 'attrlist': attrlist, - 'attrsonly': attrsonly - }) - - value = self._get_return_value('search_s', - (base, scope, filterstr, attrlist, attrsonly)) - if value is None: - value = self._search_s(base, scope, filterstr, attrlist, attrsonly) - - return self._add_async_result(value) - - def result(self, msgid, all=1, timeout=None): - self._record_call('result', { - 'msgid': msgid, - 'all': all, - 'timeout': timeout, - }) - - return self.RES_SEARCH_RESULT, self._pop_async_result(msgid) - - def search_s(self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0): - self._record_call('search_s', { - 'base': base, - 'scope': scope, - 'filterstr': filterstr, - 'attrlist': attrlist, - 'attrsonly': attrsonly - }) - - value = self._get_return_value('search_s', - (base, scope, filterstr, attrlist, attrsonly)) - if value is None: - value = self._search_s(base, scope, filterstr, attrlist, attrsonly) - - return value - - def start_tls_s(self): - self.tls_enabled = True - - def compare_s(self, dn, attr, value): - self._record_call('compare_s', { - 'dn': dn, - 'attr': attr, - 'value': value - }) - - result = self._get_return_value('compare_s', (dn, attr, value)) - if result is None: - result = self._compare_s(dn, attr, value) - - # print "compare_s('%s', '%s', '%s'): %d" % (dn, attr, value, result) - - return result - - # - # Internal implementations - # - - def _simple_bind_s(self, who='', cred=''): - success = False - - if(who == '' and cred == ''): - success = True - elif self._compare_s(who.lower(), 'userPassword', cred): - success = True - - if success: - return (97, []) # python-ldap returns this; I don't know what it means - else: - raise self.INVALID_CREDENTIALS('%s:%s' % (who, cred)) - - def _compare_s(self, dn, attr, value): - if dn not in self.directory: - raise self.NO_SUCH_OBJECT - - if attr not in self.directory[dn]: - raise self.NO_SUCH_ATTRIBUTE - - return (value in self.directory[dn][attr]) and 1 or 0 - - def _search_s(self, base, scope, filterstr, attrlist, attrsonly): - """ - We can do a SCOPE_BASE search with the default filter. Beyond that, - you're on your own. - """ - if scope != self.SCOPE_BASE: - raise self.PresetReturnRequiredError('search_s("%s", %d, "%s", "%s", %d)' % - (base, scope, filterstr, attrlist, attrsonly)) - - if filterstr != '(objectClass=*)': - raise self.PresetReturnRequiredError('search_s("%s", %d, "%s", "%s", %d)' % - (base, scope, filterstr, attrlist, attrsonly)) - - attrs = self.directory.get(base) - if attrs is None: - raise self.NO_SUCH_OBJECT() - - return [(base, attrs)] - - def _add_async_result(self, value): - self.async_results.append(value) - - return len(self.async_results) - 1 - - def _pop_async_result(self, msgid): - if msgid in xrange(len(self.async_results)): - value = self.async_results[msgid] - self.async_results[msgid] = None - else: - value = None - - return value - - # - # Utils - # - - def _record_call(self, api_name, arguments): - self.calls.append((api_name, arguments)) - - def _get_return_value(self, api_name, arguments): - try: - value = self.return_value_maps[api_name][arguments] - except KeyError: - value = None - - if isinstance(value, Exception): - raise value - - return value - - class LDAPTest(TestCase): - # Following are the objecgs in our mock LDAP directory + top = ("o=test", {"o": "test"}) + people = ("ou=people,o=test", {"ou": "people"}) + groups = ("ou=groups,o=test", {"ou": "groups"}) + alice = ("uid=alice,ou=people,o=test", { "uid": ["alice"], "objectClass": ["person", "organizationalPerson", "inetOrgPerson", "posixAccount"], @@ -395,6 +107,7 @@ class LDAPTest(TestCase): "cn": ["active_px"], "objectClass": ["posixGroup"], "gidNumber": ["1000"], + "memberUid": [], }) staff_px = ("cn=staff_px,ou=groups,o=test", { "cn": ["staff_px"], @@ -410,6 +123,11 @@ class LDAPTest(TestCase): }) # groupOfUniqueName groups + empty_gon = ("cn=empty_gon,ou=groups,o=test", { + "cn": ["empty_gon"], + "objectClass": ["groupOfNames"], + "member": [] + }) active_gon = ("cn=active_gon,ou=groups,o=test", { "cn": ["active_gon"], "objectClass": ["groupOfNames"], @@ -446,61 +164,66 @@ class LDAPTest(TestCase): "member": ["cn=parent_gon,ou=groups,o=test"] }) - mock_ldap = MockLDAP({ - alice[0]: alice[1], - bob[0]: bob[1], - dressler[0]: dressler[1], - nobody[0]: nobody[1], - active_px[0]: active_px[1], - staff_px[0]: staff_px[1], - superuser_px[0]: superuser_px[1], - active_gon[0]: active_gon[1], - staff_gon[0]: staff_gon[1], - superuser_gon[0]: superuser_gon[1], - parent_gon[0]: parent_gon[1], - nested_gon[0]: nested_gon[1], - circular_gon[0]: circular_gon[1], - }) - - logging_configured = False + directory = dict([top, people, groups, alice, bob, dressler, nobody, + active_px, staff_px, superuser_px, empty_gon, active_gon, + staff_gon, superuser_gon, parent_gon, nested_gon, + circular_gon]) + @classmethod def configure_logger(cls): - if not cls.logging_configured: - logger = logging.getLogger('django_auth_ldap') - formatter = logging.Formatter("LDAP auth - %(levelname)s - %(message)s") - handler = logging.StreamHandler() + logger = logging.getLogger('django_auth_ldap') + formatter = logging.Formatter("LDAP auth - %(levelname)s - %(message)s") + handler = logging.StreamHandler() - handler.setLevel(logging.DEBUG) - handler.setFormatter(formatter) - logger.addHandler(handler) + handler.setLevel(logging.DEBUG) + handler.setFormatter(formatter) + logger.addHandler(handler) - logger.setLevel(logging.CRITICAL) + logger.setLevel(logging.CRITICAL) - cls.logging_configured = True - configure_logger = classmethod(configure_logger) + @classmethod + def setUpClass(cls): + cls.configure_logger() + cls.mockldap = mockldap.MockLdap(cls.directory) + + @classmethod + def tearDownClass(cls): + del cls.mockldap def setUp(self): - self.configure_logger() - - self.ldap = _LDAPConfig.ldap = self.mock_ldap + self.mockldap.start() + self.ldapobj = self.mockldap['ldap://localhost'] self.backend = backend.LDAPBackend() - self.backend.ldap # Force global configuration - - self.mock_ldap.reset() + self.backend.ldap # Force global configuration def tearDown(self): - pass + self.mockldap.stop() + del self.ldapobj def test_options(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', CONNECTION_OPTIONS={'opt1': 'value1'} ) + self.backend.authenticate(username='alice', password='password') + + self.assertEqual(self.ldapobj.get_option('opt1'), 'value1') + + def test_callable_server_uri(self): + self._init_settings( + SERVER_URI=lambda: 'ldap://ldap.example.com', + USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test' + ) self.backend.authenticate(username='alice', password='password') - self.assertEqual(self.mock_ldap.options, {'opt1': 'value1'}) + ldapobj = self.mockldap['ldap://ldap.example.com'] + self.assertEqual( + ldapobj.methods_called(with_args=True), + [('initialize', ('ldap://ldap.example.com',), {}), + ('simple_bind_s', ('uid=alice,ou=people,o=test', 'password'), {})] + ) def test_simple_bind(self): self._init_settings( @@ -510,11 +233,13 @@ class LDAPTest(TestCase): user = self.backend.authenticate(username='alice', password='password') - self.assert_(not user.has_usable_password()) + self.assertTrue(not user.has_usable_password()) self.assertEqual(user.username, 'alice') self.assertEqual(User.objects.count(), user_count + 1) - self.assertEqual(self.mock_ldap.ldap_methods_called(), - ['initialize', 'simple_bind_s']) + self.assertEqual( + self.ldapobj.methods_called(), + ['initialize', 'simple_bind_s'] + ) def test_new_user_lowercase(self): self._init_settings( @@ -524,11 +249,13 @@ class LDAPTest(TestCase): user = self.backend.authenticate(username='Alice', password='password') - self.assert_(not user.has_usable_password()) + self.assertTrue(not user.has_usable_password()) self.assertEqual(user.username, 'alice') self.assertEqual(User.objects.count(), user_count + 1) - self.assertEqual(self.mock_ldap.ldap_methods_called(), - ['initialize', 'simple_bind_s']) + self.assertEqual( + self.ldapobj.methods_called(), + ['initialize', 'simple_bind_s'] + ) def test_deepcopy(self): self._init_settings( @@ -546,7 +273,7 @@ class LDAPTest(TestCase): user = self.backend.authenticate(username='Alice', password='password') - self.assert_(isinstance(user, TestUser)) + self.assertTrue(isinstance(user, TestUser)) @override_settings(AUTH_USER_MODEL='django_auth_ldap.TestUser') def test_get_custom_user(self): @@ -557,7 +284,7 @@ class LDAPTest(TestCase): user = self.backend.authenticate(username='Alice', password='password') user = self.backend.get_user(user.id) - self.assert_(isinstance(user, TestUser)) + self.assertTrue(isinstance(user, TestUser)) def test_new_user_whitespace(self): self._init_settings( @@ -568,7 +295,7 @@ class LDAPTest(TestCase): user = self.backend.authenticate(username=' alice', password='password') user = self.backend.authenticate(username='alice ', password='password') - self.assert_(not user.has_usable_password()) + self.assertTrue(not user.has_usable_password()) self.assertEqual(user.username, 'alice') self.assertEqual(User.objects.count(), user_count + 1) @@ -580,10 +307,12 @@ class LDAPTest(TestCase): user = self.backend.authenticate(username='evil_alice', password='password') - self.assert_(user is None) + self.assertTrue(user is None) self.assertEqual(User.objects.count(), user_count) - self.assertEqual(self.mock_ldap.ldap_methods_called(), - ['initialize', 'simple_bind_s']) + self.assertEqual( + self.ldapobj.methods_called(), + ['initialize', 'simple_bind_s'] + ) def test_simple_bind_bad_password(self): self._init_settings( @@ -593,10 +322,12 @@ class LDAPTest(TestCase): user = self.backend.authenticate(username='alice', password='bogus') - self.assert_(user is None) + self.assertTrue(user is None) self.assertEqual(User.objects.count(), user_count) - self.assertEqual(self.mock_ldap.ldap_methods_called(), - ['initialize', 'simple_bind_s']) + self.assertEqual( + self.ldapobj.methods_called(), + ['initialize', 'simple_bind_s'] + ) def test_existing_user(self): self._init_settings( @@ -608,22 +339,23 @@ class LDAPTest(TestCase): user = self.backend.authenticate(username='alice', password='password') # Make sure we only created one user - self.assert_(user is not None) + self.assertTrue(user is not None) self.assertEqual(User.objects.count(), user_count) def test_existing_user_insensitive(self): self._init_settings( USER_SEARCH=LDAPSearch( - "ou=people,o=test", self.mock_ldap.SCOPE_SUBTREE, '(uid=%(user)s)' + "ou=people,o=test", ldap.SCOPE_SUBTREE, '(uid=%(user)s)' ) ) - self.mock_ldap.set_return_value('search_s', - ("ou=people,o=test", 2, "(uid=Alice)", None, 0), [self.alice]) + # mockldap doesn't handle case-insensitive matching properly. + self.ldapobj.search_s.seed('ou=people,o=test', ldap.SCOPE_SUBTREE, + '(uid=Alice)')([self.alice]) User.objects.create(username='alice') user = self.backend.authenticate(username='Alice', password='password') - self.assert_(user is not None) + self.assertTrue(user is not None) self.assertEqual(user.username, 'alice') self.assertEqual(User.objects.count(), 1) @@ -655,99 +387,101 @@ class LDAPTest(TestCase): def test_search_bind(self): self._init_settings( USER_SEARCH=LDAPSearch( - "ou=people,o=test", self.mock_ldap.SCOPE_SUBTREE, '(uid=%(user)s)' + "ou=people,o=test", ldap.SCOPE_SUBTREE, '(uid=%(user)s)' ) ) - self.mock_ldap.set_return_value('search_s', - ("ou=people,o=test", 2, "(uid=alice)", None, 0), [self.alice]) user_count = User.objects.count() user = self.backend.authenticate(username='alice', password='password') - self.assert_(user is not None) + self.assertTrue(user is not None) self.assertEqual(User.objects.count(), user_count + 1) - self.assertEqual(self.mock_ldap.ldap_methods_called(), - ['initialize', 'simple_bind_s', 'search_s', 'simple_bind_s']) + self.assertEqual( + self.ldapobj.methods_called(), + ['initialize', 'simple_bind_s', 'search_s', 'simple_bind_s'] + ) def test_search_bind_no_user(self): self._init_settings( USER_SEARCH=LDAPSearch( - "ou=people,o=test", self.mock_ldap.SCOPE_SUBTREE, '(cn=%(user)s)' + "ou=people,o=test", ldap.SCOPE_SUBTREE, '(cn=%(user)s)' ) ) - self.mock_ldap.set_return_value('search_s', - ("ou=people,o=test", 2, "(cn=alice)", None, 0), []) user = self.backend.authenticate(username='alice', password='password') - self.assert_(user is None) - self.assertEqual(self.mock_ldap.ldap_methods_called(), - ['initialize', 'simple_bind_s', 'search_s']) + self.assertTrue(user is None) + self.assertEqual( + self.ldapobj.methods_called(), + ['initialize', 'simple_bind_s', 'search_s'] + ) def test_search_bind_multiple_users(self): self._init_settings( USER_SEARCH=LDAPSearch( - "ou=people,o=test", self.mock_ldap.SCOPE_SUBTREE, '(uid=*)' + "ou=people,o=test", ldap.SCOPE_SUBTREE, '(uid=*)' ) ) - self.mock_ldap.set_return_value('search_s', - ("ou=people,o=test", 2, "(uid=*)", None, 0), [self.alice, self.bob]) user = self.backend.authenticate(username='alice', password='password') - self.assert_(user is None) - self.assertEqual(self.mock_ldap.ldap_methods_called(), - ['initialize', 'simple_bind_s', 'search_s']) + self.assertTrue(user is None) + self.assertEqual( + self.ldapobj.methods_called(), + ['initialize', 'simple_bind_s', 'search_s'] + ) def test_search_bind_bad_password(self): self._init_settings( USER_SEARCH=LDAPSearch( - "ou=people,o=test", self.mock_ldap.SCOPE_SUBTREE, '(uid=%(user)s)' + "ou=people,o=test", ldap.SCOPE_SUBTREE, '(uid=%(user)s)' ) ) - self.mock_ldap.set_return_value('search_s', - ("ou=people,o=test", 2, "(uid=alice)", None, 0), [self.alice]) user = self.backend.authenticate(username='alice', password='bogus') - self.assert_(user is None) - self.assertEqual(self.mock_ldap.ldap_methods_called(), - ['initialize', 'simple_bind_s', 'search_s', 'simple_bind_s']) + self.assertTrue(user is None) + self.assertEqual( + self.ldapobj.methods_called(), + ['initialize', 'simple_bind_s', 'search_s', 'simple_bind_s'] + ) def test_search_bind_with_credentials(self): self._init_settings( BIND_DN='uid=bob,ou=people,o=test', BIND_PASSWORD='password', USER_SEARCH=LDAPSearch( - "ou=people,o=test", self.mock_ldap.SCOPE_SUBTREE, '(uid=%(user)s)' + "ou=people,o=test", ldap.SCOPE_SUBTREE, '(uid=%(user)s)' ) ) - self.mock_ldap.set_return_value('search_s', - ("ou=people,o=test", 2, "(uid=alice)", None, 0), [self.alice]) user = self.backend.authenticate(username='alice', password='password') - self.assert_(user is not None) - self.assert_(user.ldap_user is not None) + self.assertTrue(user is not None) + self.assertTrue(user.ldap_user is not None) self.assertEqual(user.ldap_user.dn, self.alice[0]) - self.assertEqual(user.ldap_user.attrs, self.alice[1]) - self.assertEqual(self.mock_ldap.ldap_methods_called(), - ['initialize', 'simple_bind_s', 'search_s', 'simple_bind_s']) + self.assertEqual(user.ldap_user.attrs, ldap.cidict.cidict(self.alice[1])) + self.assertEqual( + self.ldapobj.methods_called(), + ['initialize', 'simple_bind_s', 'search_s', 'simple_bind_s'] + ) def test_search_bind_with_bad_credentials(self): self._init_settings( BIND_DN='uid=bob,ou=people,o=test', BIND_PASSWORD='bogus', USER_SEARCH=LDAPSearch( - "ou=people,o=test", self.mock_ldap.SCOPE_SUBTREE, '(uid=%(user)s)' + "ou=people,o=test", ldap.SCOPE_SUBTREE, '(uid=%(user)s)' ) ) user = self.backend.authenticate(username='alice', password='password') - self.assert_(user is None) - self.assertEqual(self.mock_ldap.ldap_methods_called(), - ['initialize', 'simple_bind_s']) + self.assertTrue(user is None) + self.assertEqual( + self.ldapobj.methods_called(), + ['initialize', 'simple_bind_s'] + ) def test_unicode_user(self): self._init_settings( @@ -756,8 +490,7 @@ class LDAPTest(TestCase): ) user = self.backend.authenticate(username=u'dreßler', password='password') - - self.assert_(user is not None) + self.assertTrue(user is not None) self.assertEqual(user.username, u'dreßler') self.assertEqual(user.last_name, u'Dreßler') @@ -767,8 +500,7 @@ class LDAPTest(TestCase): ) user = self.backend.authenticate(username="alice", password="password") - - self.assert_(isinstance(user.ldap_user.attrs, self.ldap.cidict.cidict)) + self.assertTrue(isinstance(user.ldap_user.attrs, ldap.cidict.cidict)) def test_populate_user(self): self._init_settings( @@ -783,8 +515,10 @@ class LDAPTest(TestCase): self.assertEqual(user.last_name, 'Adams') # init, bind as user, bind anonymous, lookup user attrs - self.assertEqual(self.mock_ldap.ldap_methods_called(), - ['initialize', 'simple_bind_s', 'simple_bind_s', 'search_s']) + self.assertEqual( + self.ldapobj.methods_called(), + ['initialize', 'simple_bind_s', 'simple_bind_s', 'search_s'] + ) def test_bind_as_user(self): self._init_settings( @@ -800,8 +534,10 @@ class LDAPTest(TestCase): self.assertEqual(user.last_name, 'Adams') # init, bind as user, lookup user attrs - self.assertEqual(self.mock_ldap.ldap_methods_called(), - ['initialize', 'simple_bind_s', 'search_s']) + self.assertEqual( + self.ldapobj.methods_called(), + ['initialize', 'simple_bind_s', 'search_s'] + ) def test_signal_populate_user(self): self._init_settings( @@ -809,13 +545,13 @@ class LDAPTest(TestCase): ) def handle_populate_user(sender, **kwargs): - self.assert_('user' in kwargs and 'ldap_user' in kwargs) + self.assertTrue('user' in kwargs and 'ldap_user' in kwargs) kwargs['user'].populate_user_handled = True backend.populate_user.connect(handle_populate_user) user = self.backend.authenticate(username='alice', password='password') - self.assert_(user.populate_user_handled) + self.assertTrue(user.populate_user_handled) def test_signal_populate_user_profile(self): settings.AUTH_PROFILE_MODULE = 'django_auth_ldap.TestProfile' @@ -829,7 +565,7 @@ class LDAPTest(TestCase): TestProfile.objects.create(user=kwargs['instance']) def handle_populate_user_profile(sender, **kwargs): - self.assert_('profile' in kwargs and 'ldap_user' in kwargs) + self.assertTrue('profile' in kwargs and 'ldap_user' in kwargs) kwargs['profile'].populated = True django.db.models.signals.post_save.connect(handle_user_saved, sender=User) @@ -837,7 +573,7 @@ class LDAPTest(TestCase): user = self.backend.authenticate(username='alice', password='password') - self.assert_(user.get_profile().populated) + self.assertTrue(user.get_profile().populated) def test_no_update_existing(self): self._init_settings( @@ -858,7 +594,7 @@ class LDAPTest(TestCase): def test_require_group(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', - GROUP_SEARCH=LDAPSearch('ou=groups,o=test', self.mock_ldap.SCOPE_SUBTREE), + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE), GROUP_TYPE=MemberDNGroupType(member_attr='member'), REQUIRE_GROUP="cn=active_gon,ou=groups,o=test" ) @@ -866,15 +602,18 @@ class LDAPTest(TestCase): alice = self.backend.authenticate(username='alice', password='password') bob = self.backend.authenticate(username='bob', password='password') - self.assert_(alice is not None) - self.assert_(bob is None) - self.assertEqual(self.mock_ldap.ldap_methods_called(), - ['initialize', 'simple_bind_s', 'simple_bind_s', 'compare_s', 'initialize', 'simple_bind_s', 'simple_bind_s', 'compare_s']) + self.assertTrue(alice is not None) + self.assertTrue(bob is None) + self.assertEqual( + self.ldapobj.methods_called(), + ['initialize', 'simple_bind_s', 'simple_bind_s', 'compare_s', + 'initialize', 'simple_bind_s', 'simple_bind_s', 'compare_s'] + ) def test_denied_group(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', - GROUP_SEARCH=LDAPSearch('ou=groups,o=test', self.mock_ldap.SCOPE_SUBTREE), + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE), GROUP_TYPE=MemberDNGroupType(member_attr='member'), DENY_GROUP="cn=active_gon,ou=groups,o=test" ) @@ -882,22 +621,20 @@ class LDAPTest(TestCase): alice = self.backend.authenticate(username='alice', password='password') bob = self.backend.authenticate(username='bob', password='password') - self.assert_(alice is None) - self.assert_(bob is not None) - self.assertEqual(self.mock_ldap.ldap_methods_called(), - ['initialize', 'simple_bind_s', 'simple_bind_s', 'compare_s', 'initialize', 'simple_bind_s', 'simple_bind_s', 'compare_s']) + self.assertTrue(alice is None) + self.assertTrue(bob is not None) + self.assertEqual( + self.ldapobj.methods_called(), + ['initialize', 'simple_bind_s', 'simple_bind_s', 'compare_s', + 'initialize', 'simple_bind_s', 'simple_bind_s', 'compare_s'] + ) def test_group_dns(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', - GROUP_SEARCH=LDAPSearch('ou=groups,o=test', self.mock_ldap.SCOPE_SUBTREE), + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE), GROUP_TYPE=MemberDNGroupType(member_attr='member'), ) - self.mock_ldap.set_return_value('search_s', - ("ou=groups,o=test", 2, "(&(objectClass=*)(member=uid=alice,ou=people,o=test))", None, 0), - [self.active_gon, self.staff_gon, self.superuser_gon, self.nested_gon] - ) - alice = self.backend.authenticate(username='alice', password='password') self.assertEqual(alice.ldap_user.group_dns, set((g[0].lower() for g in [self.active_gon, self.staff_gon, self.superuser_gon, self.nested_gon]))) @@ -905,14 +642,9 @@ class LDAPTest(TestCase): def test_group_names(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', - GROUP_SEARCH=LDAPSearch('ou=groups,o=test', self.mock_ldap.SCOPE_SUBTREE), + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE), GROUP_TYPE=MemberDNGroupType(member_attr='member'), ) - self.mock_ldap.set_return_value('search_s', - ("ou=groups,o=test", 2, "(&(objectClass=*)(member=uid=alice,ou=people,o=test))", None, 0), - [self.active_gon, self.staff_gon, self.superuser_gon, self.nested_gon] - ) - alice = self.backend.authenticate(username='alice', password='password') self.assertEqual(alice.ldap_user.group_names, set(['active_gon', 'staff_gon', 'superuser_gon', 'nested_gon'])) @@ -920,11 +652,12 @@ class LDAPTest(TestCase): def test_dn_group_membership(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', - GROUP_SEARCH=LDAPSearch('ou=groups,o=test', self.mock_ldap.SCOPE_SUBTREE), + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE), GROUP_TYPE=MemberDNGroupType(member_attr='member'), USER_FLAGS_BY_GROUP={ 'is_active': "cn=active_gon,ou=groups,o=test", - 'is_staff': "cn=staff_gon,ou=groups,o=test", + 'is_staff': ["cn=empty_gon,ou=groups,o=test", + "cn=staff_gon,ou=groups,o=test"], 'is_superuser': "cn=superuser_gon,ou=groups,o=test" } ) @@ -932,17 +665,17 @@ class LDAPTest(TestCase): alice = self.backend.authenticate(username='alice', password='password') bob = self.backend.authenticate(username='bob', password='password') - self.assert_(alice.is_active) - self.assert_(alice.is_staff) - self.assert_(alice.is_superuser) - self.assert_(not bob.is_active) - self.assert_(not bob.is_staff) - self.assert_(not bob.is_superuser) + self.assertTrue(alice.is_active) + self.assertTrue(alice.is_staff) + self.assertTrue(alice.is_superuser) + self.assertTrue(not bob.is_active) + self.assertTrue(not bob.is_staff) + self.assertTrue(not bob.is_superuser) def test_posix_membership(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', - GROUP_SEARCH=LDAPSearch('ou=groups,o=test', self.mock_ldap.SCOPE_SUBTREE), + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE), GROUP_TYPE=PosixGroupType(), USER_FLAGS_BY_GROUP={ 'is_active': "cn=active_px,ou=groups,o=test", @@ -954,57 +687,35 @@ class LDAPTest(TestCase): alice = self.backend.authenticate(username='alice', password='password') bob = self.backend.authenticate(username='bob', password='password') - self.assert_(alice.is_active) - self.assert_(alice.is_staff) - self.assert_(alice.is_superuser) - self.assert_(not bob.is_active) - self.assert_(not bob.is_staff) - self.assert_(not bob.is_superuser) + self.assertTrue(alice.is_active) + self.assertTrue(alice.is_staff) + self.assertTrue(alice.is_superuser) + self.assertTrue(not bob.is_active) + self.assertTrue(not bob.is_staff) + self.assertTrue(not bob.is_superuser) def test_nested_dn_group_membership(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', - GROUP_SEARCH=LDAPSearch('ou=groups,o=test', self.mock_ldap.SCOPE_SUBTREE), + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE), GROUP_TYPE=NestedMemberDNGroupType(member_attr='member'), USER_FLAGS_BY_GROUP={ 'is_active': "cn=parent_gon,ou=groups,o=test", 'is_staff': "cn=parent_gon,ou=groups,o=test", } ) - self.mock_ldap.set_return_value('search_s', - ("ou=groups,o=test", 2, "(&(objectClass=*)(|(member=uid=alice,ou=people,o=test)))", None, 0), - [self.active_gon, self.nested_gon] - ) - self.mock_ldap.set_return_value('search_s', - ("ou=groups,o=test", 2, "(&(objectClass=*)(|(member=cn=active_gon,ou=groups,o=test)(member=cn=nested_gon,ou=groups,o=test)))", None, 0), - [self.parent_gon] - ) - self.mock_ldap.set_return_value('search_s', - ("ou=groups,o=test", 2, "(&(objectClass=*)(|(member=cn=parent_gon,ou=groups,o=test)))", None, 0), - [self.circular_gon] - ) - self.mock_ldap.set_return_value('search_s', - ("ou=groups,o=test", 2, "(&(objectClass=*)(|(member=cn=circular_gon,ou=groups,o=test)))", None, 0), - [self.nested_gon] - ) - - self.mock_ldap.set_return_value('search_s', - ("ou=groups,o=test", 2, "(&(objectClass=*)(|(member=uid=bob,ou=people,o=test)))", None, 0), - [] - ) - alice = self.backend.authenticate(username='alice', password='password') bob = self.backend.authenticate(username='bob', password='password') - self.assert_(alice.is_active) - self.assert_(alice.is_staff) - self.assert_(not bob.is_active) - self.assert_(not bob.is_staff) + self.assertTrue(alice.is_active) + self.assertTrue(alice.is_staff) + self.assertTrue(not bob.is_active) + self.assertTrue(not bob.is_staff) def test_posix_missing_attributes(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', - GROUP_SEARCH=LDAPSearch('ou=groups,o=test', self.mock_ldap.SCOPE_SUBTREE), + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE), GROUP_TYPE=PosixGroupType(), USER_FLAGS_BY_GROUP={ 'is_active': "cn=active_px,ou=groups,o=test" @@ -1013,17 +724,17 @@ class LDAPTest(TestCase): nobody = self.backend.authenticate(username='nobody', password='password') - self.assert_(not nobody.is_active) + self.assertTrue(not nobody.is_active) def test_profile_flags(self): settings.AUTH_PROFILE_MODULE = 'django_auth_ldap.TestProfile' self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', - GROUP_SEARCH=LDAPSearch('ou=groups,o=test', self.mock_ldap.SCOPE_SUBTREE), + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE), GROUP_TYPE=MemberDNGroupType(member_attr='member'), PROFILE_FLAGS_BY_GROUP={ - 'is_special': "cn=superuser_gon,ou=groups,o=test" + 'is_special': ["cn=superuser_gon,ou=groups,o=test"] } ) @@ -1036,78 +747,85 @@ class LDAPTest(TestCase): alice = self.backend.authenticate(username='alice', password='password') bob = self.backend.authenticate(username='bob', password='password') - self.assert_(alice.get_profile().is_special) - self.assert_(not bob.get_profile().is_special) + self.assertTrue(alice.get_profile().is_special) + self.assertTrue(not bob.get_profile().is_special) def test_dn_group_permissions(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', - GROUP_SEARCH=LDAPSearch('ou=groups,o=test', self.mock_ldap.SCOPE_SUBTREE), + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE), GROUP_TYPE=MemberDNGroupType(member_attr='member'), FIND_GROUP_PERMS=True ) self._init_groups() - self.mock_ldap.set_return_value('search_s', - ("ou=groups,o=test", 2, "(&(objectClass=*)(member=uid=alice,ou=people,o=test))", None, 0), - [self.active_gon, self.staff_gon, self.superuser_gon, self.nested_gon] - ) alice = User.objects.create(username='alice') alice = self.backend.get_user(alice.pk) self.assertEqual(self.backend.get_group_permissions(alice), set(["auth.add_user", "auth.change_user"])) self.assertEqual(self.backend.get_all_permissions(alice), set(["auth.add_user", "auth.change_user"])) - self.assert_(self.backend.has_perm(alice, "auth.add_user")) - self.assert_(self.backend.has_module_perms(alice, "auth")) + self.assertTrue(self.backend.has_perm(alice, "auth.add_user")) + self.assertTrue(self.backend.has_module_perms(alice, "auth")) def test_empty_group_permissions(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', - GROUP_SEARCH=LDAPSearch('ou=groups,o=test', self.mock_ldap.SCOPE_SUBTREE), + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE), GROUP_TYPE=MemberDNGroupType(member_attr='member'), FIND_GROUP_PERMS=True ) self._init_groups() - self.mock_ldap.set_return_value('search_s', - ("ou=groups,o=test", 2, "(&(objectClass=*)(member=uid=bob,ou=people,o=test))", None, 0), - [] - ) bob = User.objects.create(username='bob') bob = self.backend.get_user(bob.pk) self.assertEqual(self.backend.get_group_permissions(bob), set()) self.assertEqual(self.backend.get_all_permissions(bob), set()) - self.assert_(not self.backend.has_perm(bob, "auth.add_user")) - self.assert_(not self.backend.has_module_perms(bob, "auth")) + self.assertTrue(not self.backend.has_perm(bob, "auth.add_user")) + self.assertTrue(not self.backend.has_module_perms(bob, "auth")) def test_posix_group_permissions(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', - GROUP_SEARCH=LDAPSearch('ou=groups,o=test', - self.mock_ldap.SCOPE_SUBTREE, "(objectClass=posixGroup)" - ), + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE, + '(objectClass=posixGroup)'), GROUP_TYPE=PosixGroupType(), FIND_GROUP_PERMS=True ) self._init_groups() - self.mock_ldap.set_return_value('search_s', - ("ou=groups,o=test", 2, "(&(objectClass=posixGroup)(|(gidNumber=1000)(memberUid=alice)))", None, 0), - [self.active_px, self.staff_px, self.superuser_px] - ) alice = User.objects.create(username='alice') alice = self.backend.get_user(alice.pk) self.assertEqual(self.backend.get_group_permissions(alice), set(["auth.add_user", "auth.change_user"])) self.assertEqual(self.backend.get_all_permissions(alice), set(["auth.add_user", "auth.change_user"])) - self.assert_(self.backend.has_perm(alice, "auth.add_user")) - self.assert_(self.backend.has_module_perms(alice, "auth")) + self.assertTrue(self.backend.has_perm(alice, "auth.add_user")) + self.assertTrue(self.backend.has_module_perms(alice, "auth")) + + def test_posix_group_permissions_no_gid(self): + self._init_settings( + USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE, + '(objectClass=posixGroup)'), + GROUP_TYPE=PosixGroupType(), + FIND_GROUP_PERMS=True + ) + self._init_groups() + self.ldapobj.modify_s(self.alice[0], [(ldap.MOD_DELETE, 'gidNumber', None)]) + self.ldapobj.modify_s(self.active_px[0], [(ldap.MOD_ADD, 'memberUid', ['alice'])]) + + alice = User.objects.create(username='alice') + alice = self.backend.get_user(alice.pk) + + self.assertEqual(self.backend.get_group_permissions(alice), set(["auth.add_user", "auth.change_user"])) + self.assertEqual(self.backend.get_all_permissions(alice), set(["auth.add_user", "auth.change_user"])) + self.assertTrue(self.backend.has_perm(alice, "auth.add_user")) + self.assertTrue(self.backend.has_module_perms(alice, "auth")) def test_foreign_user_permissions(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', - GROUP_SEARCH=LDAPSearch('ou=groups,o=test', self.mock_ldap.SCOPE_SUBTREE), + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE), GROUP_TYPE=MemberDNGroupType(member_attr='member'), FIND_GROUP_PERMS=True ) @@ -1120,20 +838,12 @@ class LDAPTest(TestCase): def test_group_cache(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', - GROUP_SEARCH=LDAPSearch('ou=groups,o=test', self.mock_ldap.SCOPE_SUBTREE), + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE), GROUP_TYPE=MemberDNGroupType(member_attr='member'), FIND_GROUP_PERMS=True, CACHE_GROUPS=True ) self._init_groups() - self.mock_ldap.set_return_value('search_s', - ("ou=groups,o=test", 2, "(&(objectClass=*)(member=uid=alice,ou=people,o=test))", None, 0), - [self.active_gon, self.staff_gon, self.superuser_gon, self.nested_gon] - ) - self.mock_ldap.set_return_value('search_s', - ("ou=groups,o=test", 2, "(&(objectClass=*)(member=uid=bob,ou=people,o=test))", None, 0), - [] - ) alice_id = User.objects.create(username='alice').pk bob_id = User.objects.create(username='bob').pk @@ -1141,29 +851,29 @@ class LDAPTest(TestCase): # Check permissions twice for each user for i in range(2): alice = self.backend.get_user(alice_id) - self.assertEqual(self.backend.get_group_permissions(alice), - set(["auth.add_user", "auth.change_user"])) + self.assertEqual( + self.backend.get_group_permissions(alice), + set(["auth.add_user", "auth.change_user"]) + ) bob = self.backend.get_user(bob_id) self.assertEqual(self.backend.get_group_permissions(bob), set()) # Should have executed one LDAP search per user - self.assertEqual(self.mock_ldap.ldap_methods_called(), - ['initialize', 'simple_bind_s', 'search_s', 'initialize', 'simple_bind_s', 'search_s']) + self.assertEqual( + self.ldapobj.methods_called(), + ['initialize', 'simple_bind_s', 'search_s', + 'initialize', 'simple_bind_s', 'search_s'] + ) def test_group_mirroring(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', - GROUP_SEARCH=LDAPSearch('ou=groups,o=test', - self.mock_ldap.SCOPE_SUBTREE, "(objectClass=posixGroup)" - ), + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE, + '(objectClass=posixGroup)'), GROUP_TYPE=PosixGroupType(), MIRROR_GROUPS=True, ) - self.mock_ldap.set_return_value('search_s', - ("ou=groups,o=test", 2, "(&(objectClass=posixGroup)(|(gidNumber=1000)(memberUid=alice)))", None, 0), - [self.active_px, self.staff_px, self.superuser_px] - ) self.assertEqual(Group.objects.count(), 0) @@ -1175,47 +885,30 @@ class LDAPTest(TestCase): def test_nested_group_mirroring(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', - GROUP_SEARCH=LDAPSearch('ou=groups,o=test', self.mock_ldap.SCOPE_SUBTREE), + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE, + '(objectClass=groupOfNames)'), GROUP_TYPE=NestedMemberDNGroupType(member_attr='member'), MIRROR_GROUPS=True, ) - self.mock_ldap.set_return_value('search_s', - ("ou=groups,o=test", 2, "(&(objectClass=*)(|(member=uid=alice,ou=people,o=test)))", None, 0), - [self.active_gon, self.nested_gon] - ) - self.mock_ldap.set_return_value('search_s', - ("ou=groups,o=test", 2, "(&(objectClass=*)(|(member=cn=active_gon,ou=groups,o=test)(member=cn=nested_gon,ou=groups,o=test)))", None, 0), - [self.parent_gon] - ) - self.mock_ldap.set_return_value('search_s', - ("ou=groups,o=test", 2, "(&(objectClass=*)(|(member=cn=parent_gon,ou=groups,o=test)))", None, 0), - [self.circular_gon] - ) - self.mock_ldap.set_return_value('search_s', - ("ou=groups,o=test", 2, "(&(objectClass=*)(|(member=cn=circular_gon,ou=groups,o=test)))", None, 0), - [self.nested_gon] - ) alice = self.backend.authenticate(username='alice', password='password') - self.assertEqual(Group.objects.count(), 4) - self.assertEqual(set(Group.objects.all().values_list('name', flat=True)), - set(['active_gon', 'nested_gon', 'parent_gon', 'circular_gon'])) + self.assertEqual( + set(Group.objects.all().values_list('name', flat=True)), + set(['active_gon', 'staff_gon', 'superuser_gon', 'nested_gon', + 'parent_gon', 'circular_gon']) + ) self.assertEqual(set(alice.groups.all()), set(Group.objects.all())) def test_authorize_external_users(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', - GROUP_SEARCH=LDAPSearch('ou=groups,o=test', self.mock_ldap.SCOPE_SUBTREE), + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE), GROUP_TYPE=MemberDNGroupType(member_attr='member'), FIND_GROUP_PERMS=True, AUTHORIZE_ALL_USERS=True ) self._init_groups() - self.mock_ldap.set_return_value('search_s', - ("ou=groups,o=test", 2, "(&(objectClass=*)(member=uid=alice,ou=people,o=test))", None, 0), - [self.active_gon, self.staff_gon, self.superuser_gon, self.nested_gon] - ) alice = User.objects.create(username='alice') @@ -1229,25 +922,25 @@ class LDAPTest(TestCase): alice = self.backend.populate_user('alice') bob = self.backend.populate_user('bob') - self.assert_(alice is not None) + self.assertTrue(alice is not None) self.assertEqual(alice.first_name, u"") self.assertEqual(alice.last_name, u"") - self.assert_(alice.is_active) - self.assert_(not alice.is_staff) - self.assert_(not alice.is_superuser) - self.assert_(bob is not None) + self.assertTrue(alice.is_active) + self.assertTrue(not alice.is_staff) + self.assertTrue(not alice.is_superuser) + self.assertTrue(bob is not None) self.assertEqual(bob.first_name, u"") self.assertEqual(bob.last_name, u"") - self.assert_(bob.is_active) - self.assert_(not bob.is_staff) - self.assert_(not bob.is_superuser) + self.assertTrue(bob.is_active) + self.assertTrue(not bob.is_staff) + self.assertTrue(not bob.is_superuser) def test_populate_without_auth(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', ALWAYS_UPDATE_USER=False, USER_ATTR_MAP={'first_name': 'givenName', 'last_name': 'sn'}, - GROUP_SEARCH=LDAPSearch('ou=groups,o=test', self.mock_ldap.SCOPE_SUBTREE), + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE), GROUP_TYPE=GroupOfNamesType(), USER_FLAGS_BY_GROUP={ 'is_active': "cn=active_gon,ou=groups,o=test", @@ -1262,18 +955,18 @@ class LDAPTest(TestCase): alice = self.backend.populate_user('alice') bob = self.backend.populate_user('bob') - self.assert_(alice is not None) + self.assertTrue(alice is not None) self.assertEqual(alice.first_name, u"Alice") self.assertEqual(alice.last_name, u"Adams") - self.assert_(alice.is_active) - self.assert_(alice.is_staff) - self.assert_(alice.is_superuser) - self.assert_(bob is not None) + self.assertTrue(alice.is_active) + self.assertTrue(alice.is_staff) + self.assertTrue(alice.is_superuser) + self.assertTrue(bob is not None) self.assertEqual(bob.first_name, u"Robert") self.assertEqual(bob.last_name, u"Barker") - self.assert_(not bob.is_active) - self.assert_(not bob.is_staff) - self.assert_(not bob.is_superuser) + self.assertTrue(not bob.is_active) + self.assertTrue(not bob.is_staff) + self.assertTrue(not bob.is_superuser) def test_populate_bogus_user(self): self._init_settings( @@ -1290,9 +983,9 @@ class LDAPTest(TestCase): START_TLS=False, ) - self.assert_(not self.mock_ldap.tls_enabled) + self.assertTrue(not self.ldapobj.tls_enabled) self.backend.authenticate(username='alice', password='password') - self.assert_(not self.mock_ldap.tls_enabled) + self.assertTrue(not self.ldapobj.tls_enabled) def test_start_tls(self): self._init_settings( @@ -1300,9 +993,9 @@ class LDAPTest(TestCase): START_TLS=True, ) - self.assert_(not self.mock_ldap.tls_enabled) + self.assertTrue(not self.ldapobj.tls_enabled) self.backend.authenticate(username='alice', password='password') - self.assert_(self.mock_ldap.tls_enabled) + self.assertTrue(self.ldapobj.tls_enabled) def test_null_search_results(self): """ @@ -1310,33 +1003,27 @@ class LDAPTest(TestCase): """ self._init_settings( USER_SEARCH=LDAPSearch( - "ou=people,o=test", self.mock_ldap.SCOPE_SUBTREE, '(uid=%(user)s)' + "ou=people,o=test", ldap.SCOPE_SUBTREE, '(uid=%(user)s)' ) ) - self.mock_ldap.set_return_value('search_s', - ("ou=people,o=test", 2, "(uid=alice)", None, 0), [self.alice, (None, '')]) - self.backend.authenticate(username='alice', password='password') def test_union_search(self): self._init_settings( USER_SEARCH=LDAPSearchUnion( - LDAPSearch("ou=groups,o=test", self.mock_ldap.SCOPE_SUBTREE, '(uid=%(user)s)'), - LDAPSearch("ou=people,o=test", self.mock_ldap.SCOPE_SUBTREE, '(uid=%(user)s)'), + LDAPSearch("ou=groups,o=test", ldap.SCOPE_SUBTREE, '(uid=%(user)s)'), + LDAPSearch("ou=people,o=test", ldap.SCOPE_SUBTREE, '(uid=%(user)s)'), ) ) - self.mock_ldap.set_return_value('search_s', - ("ou=groups,o=test", 2, "(uid=alice)", None, 0), []) - self.mock_ldap.set_return_value('search_s', - ("ou=people,o=test", 2, "(uid=alice)", None, 0), [self.alice]) - alice = self.backend.authenticate(username='alice', password='password') - self.assert_(alice is not None) + self.assertTrue(alice is not None) - self.assertEqual(self.mock_ldap.ldap_methods_called(), + self.assertEqual( + self.ldapobj.methods_called(), ['initialize', 'simple_bind_s', 'search', 'search', 'result', - 'result', 'simple_bind_s']) + 'result', 'simple_bind_s'] + ) def test_deny_empty_password(self): self._init_settings( @@ -1346,7 +1033,7 @@ class LDAPTest(TestCase): alice = self.backend.authenticate(username=u'alice', password=u'') self.assertEqual(alice, None) - self.assertEqual(self.mock_ldap.ldap_methods_called(), []) + self.assertEqual(self.ldapobj.methods_called(), []) def test_permit_empty_password(self): self._init_settings( @@ -1357,21 +1044,19 @@ class LDAPTest(TestCase): alice = self.backend.authenticate(username=u'alice', password=u'') self.assertEqual(alice, None) - self.assertEqual(self.mock_ldap.ldap_methods_called(), - ['initialize', 'simple_bind_s']) + self.assertEqual( + self.ldapobj.methods_called(), + ['initialize', 'simple_bind_s'] + ) def test_pickle(self): self._init_settings( USER_DN_TEMPLATE='uid=%(user)s,ou=people,o=test', - GROUP_SEARCH=LDAPSearch('ou=groups,o=test', self.mock_ldap.SCOPE_SUBTREE), + GROUP_SEARCH=LDAPSearch('ou=groups,o=test', ldap.SCOPE_SUBTREE), GROUP_TYPE=MemberDNGroupType(member_attr='member'), FIND_GROUP_PERMS=True ) self._init_groups() - self.mock_ldap.set_return_value('search_s', - ("ou=groups,o=test", 2, "(&(objectClass=*)(member=uid=alice,ou=people,o=test))", None, 0), - [self.active_gon, self.staff_gon, self.superuser_gon, self.nested_gon] - ) alice0 = self.backend.authenticate(username=u'alice', password=u'password') @@ -1379,11 +1064,11 @@ class LDAPTest(TestCase): alice = pickle.loads(pickled) alice.ldap_user.backend.settings = alice0.ldap_user.backend.settings - self.assert_(alice is not None) + self.assertTrue(alice is not None) self.assertEqual(self.backend.get_group_permissions(alice), set(["auth.add_user", "auth.change_user"])) self.assertEqual(self.backend.get_all_permissions(alice), set(["auth.add_user", "auth.change_user"])) - self.assert_(self.backend.has_perm(alice, "auth.add_user")) - self.assert_(self.backend.has_module_perms(alice, "auth")) + self.assertTrue(self.backend.has_perm(alice, "auth.add_user")) + self.assertTrue(self.backend.has_module_perms(alice, "auth")) def _init_settings(self, **kwargs): self.backend.settings = TestSettings(**kwargs) @@ -1399,3 +1084,7 @@ class LDAPTest(TestCase): active_px = Group.objects.create(name='active_px') active_px.permissions.add(*permissions) + + +# Python 2.5-compatible class decoration +LDAPTest = unittest.skipIf(mockldap is None, "django_auth_ldap tests require the mockldap package.")(LDAPTest) diff --git a/awx/lib/site-packages/django_extensions/__init__.py b/awx/lib/site-packages/django_extensions/__init__.py index 9c5891c6bf..ec2dcb1cb0 100644 --- a/awx/lib/site-packages/django_extensions/__init__.py +++ b/awx/lib/site-packages/django_extensions/__init__.py @@ -1,5 +1,5 @@ -VERSION = (1, 2, 2) +VERSION = (1, 2, 5) # Dynamically calculate the version based on VERSION tuple if len(VERSION) > 2 and VERSION[2] is not None: diff --git a/awx/lib/site-packages/django_extensions/admin/__init__.py b/awx/lib/site-packages/django_extensions/admin/__init__.py index 7564ea65e3..51a50d3303 100644 --- a/awx/lib/site-packages/django_extensions/admin/__init__.py +++ b/awx/lib/site-packages/django_extensions/admin/__init__.py @@ -109,7 +109,7 @@ class ForeignKeyAutocompleteAdmin(ModelAdmin): other_qs.dup_select_related(queryset) other_qs = other_qs.filter(reduce(operator.or_, or_queries)) queryset = queryset & other_qs - data = ''.join([six.u('%s|%s\n' % (to_string_function(f), f.pk)) for f in queryset]) + data = ''.join([six.u('%s|%s\n') % (to_string_function(f), f.pk) for f in queryset]) elif object_pk: try: obj = queryset.get(pk=object_pk) diff --git a/awx/lib/site-packages/django_extensions/admin/widgets.py b/awx/lib/site-packages/django_extensions/admin/widgets.py index 1f29d0fe78..07fa22e238 100644 --- a/awx/lib/site-packages/django_extensions/admin/widgets.py +++ b/awx/lib/site-packages/django_extensions/admin/widgets.py @@ -26,8 +26,6 @@ class ForeignKeySearchInput(ForeignKeyRawIdWidget): js_files = ['django_extensions/js/jquery.bgiframe.min.js', 'django_extensions/js/jquery.ajaxQueue.js', 'django_extensions/js/jquery.autocomplete.js'] - if django.get_version() < "1.3": - js_files.append('django_extensions/js/jquery.js') return forms.Media(css={'all': ('django_extensions/css/jquery.autocomplete.css',)}, js=js_files) diff --git a/awx/lib/site-packages/django_extensions/db/fields/__init__.py b/awx/lib/site-packages/django_extensions/db/fields/__init__.py index 337ddd2c27..0639f170c5 100644 --- a/awx/lib/site-packages/django_extensions/db/fields/__init__.py +++ b/awx/lib/site-packages/django_extensions/db/fields/__init__.py @@ -285,3 +285,8 @@ class UUIDField(CharField): args, kwargs = introspector(self) # That's our definition! return (field_class, args, kwargs) + + +class PostgreSQLUUIDField(UUIDField): + def db_type(self, connection=None): + return "UUID" diff --git a/awx/lib/site-packages/django_extensions/db/fields/json.py b/awx/lib/site-packages/django_extensions/db/fields/json.py index 51d5b1dd52..ef1705914f 100644 --- a/awx/lib/site-packages/django_extensions/db/fields/json.py +++ b/awx/lib/site-packages/django_extensions/db/fields/json.py @@ -11,29 +11,25 @@ more information. """ import six -import datetime from decimal import Decimal from django.db import models from django.conf import settings -from django.utils import simplejson +from django.core.serializers.json import DjangoJSONEncoder - -class JSONEncoder(simplejson.JSONEncoder): - def default(self, obj): - if isinstance(obj, Decimal): - return str(obj) - elif isinstance(obj, datetime.datetime): - assert settings.TIME_ZONE == 'UTC' - return obj.strftime('%Y-%m-%dT%H:%M:%SZ') - return simplejson.JSONEncoder.default(self, obj) +try: + # Django <= 1.6 backwards compatibility + from django.utils import simplejson as json +except ImportError: + # Django >= 1.7 + import json def dumps(value): - return JSONEncoder().encode(value) + return DjangoJSONEncoder().encode(value) def loads(txt): - value = simplejson.loads( + value = json.loads( txt, parse_float=Decimal, encoding=settings.DEFAULT_CHARSET diff --git a/awx/lib/site-packages/django_extensions/jobs/minutely/__init__.py b/awx/lib/site-packages/django_extensions/jobs/minutely/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/django_extensions/management/color.py b/awx/lib/site-packages/django_extensions/management/color.py index 99fead3517..ccd84069d0 100644 --- a/awx/lib/site-packages/django_extensions/management/color.py +++ b/awx/lib/site-packages/django_extensions/management/color.py @@ -9,6 +9,9 @@ from django.utils import termcolors def color_style(): style = color.color_style() if color.supports_color(): + style.INFO = termcolors.make_style(fg='green') + style.WARN = termcolors.make_style(fg='yellow') + style.BOLD = termcolors.make_style(opts=('bold',)) style.URL = termcolors.make_style(fg='green', opts=('bold',)) style.MODULE = termcolors.make_style(fg='yellow') style.MODULE_NAME = termcolors.make_style(opts=('bold',)) diff --git a/awx/lib/site-packages/django_extensions/management/commands/clean_pyc.py b/awx/lib/site-packages/django_extensions/management/commands/clean_pyc.py index f9625da218..b409ddaed8 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/clean_pyc.py +++ b/awx/lib/site-packages/django_extensions/management/commands/clean_pyc.py @@ -35,10 +35,3 @@ class Command(NoArgsCommand): print(full_path) os.remove(full_path) -# Backwards compatibility for Django r9110 -if not [opt for opt in Command.option_list if opt.dest == 'verbosity']: - Command.option_list += ( - make_option('--verbosity', '-v', action="store", dest="verbosity", - default='1', type='choice', choices=['0', '1', '2'], - help="Verbosity level; 0=minimal output, 1=normal output, 2=all output"), - ) diff --git a/awx/lib/site-packages/django_extensions/management/commands/compile_pyc.py b/awx/lib/site-packages/django_extensions/management/commands/compile_pyc.py index 2d75772932..f978d3e37b 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/compile_pyc.py +++ b/awx/lib/site-packages/django_extensions/management/commands/compile_pyc.py @@ -28,11 +28,3 @@ class Command(NoArgsCommand): if verbose: print("%sc" % full_path) py_compile.compile(full_path) - -# Backwards compatibility for Django r9110 -if not [opt for opt in Command.option_list if opt.dest == 'verbosity']: - Command.option_list += ( - make_option('--verbosity', '-v', action="store", dest="verbosity", - default='1', type='choice', choices=['0', '1', '2'], - help="Verbosity level; 0=minimal output, 1=normal output, 2=all output"), - ) diff --git a/awx/lib/site-packages/django_extensions/management/commands/graph_models.py b/awx/lib/site-packages/django_extensions/management/commands/graph_models.py index 59403e248c..00942d0803 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/graph_models.py +++ b/awx/lib/site-packages/django_extensions/management/commands/graph_models.py @@ -1,10 +1,29 @@ +import sys +from optparse import make_option, NO_DEFAULT from django.core.management.base import BaseCommand, CommandError -from optparse import make_option +from django.conf import settings from django_extensions.management.modelviz import generate_dot +try: + import pygraphviz + HAS_PYGRAPHVIZ = True +except ImportError: + HAS_PYGRAPHVIZ = False + +try: + import pydot + HAS_PYDOT = True +except ImportError: + HAS_PYDOT = False + + class Command(BaseCommand): - option_list = BaseCommand.option_list + ( + graph_models_options = ( + make_option('--pygraphviz', action='store_true', dest='pygraphviz', + help='Use PyGraphViz to generate the image.'), + make_option('--pydot', action='store_true', dest='pydot', + help='Use PyDot to generate the image.'), make_option('--disable-fields', '-d', action='store_true', dest='disable_fields', help='Do not show the class member fields'), make_option('--group-models', '-g', action='store_true', dest='group_models', @@ -23,11 +42,18 @@ class Command(BaseCommand): help='Exclude specific column(s) from the graph. Can also load exclude list from file.'), make_option('--exclude-models', '-X', action='store', dest='exclude_models', help='Exclude specific model(s) from the graph. Can also load exclude list from file.'), - make_option('--inheritance', '-e', action='store_true', dest='inheritance', + make_option('--inheritance', '-e', action='store_true', dest='inheritance', default=True, + help='Include inheritance arrows (default)'), + make_option('--no-inheritance', '-E', action='store_false', dest='inheritance', help='Include inheritance arrows'), + make_option('--hide-relations-from-fields', '-R', action='store_false', dest="relations_as_fields", + default=True, help="Do not show relations as fields in the graph."), + make_option('--disable-sort-fields', '-S', action="store_false", dest="sort_fields", + default=True, help="Do not sort fields"), ) + option_list = BaseCommand.option_list + graph_models_options - help = ("Creates a GraphViz dot file for the specified app names. You can pass multiple app names and they will all be combined into a single model. Output is usually directed to a dot file.") + help = "Creates a GraphViz dot file for the specified app names. You can pass multiple app names and they will all be combined into a single model. Output is usually directed to a dot file." args = "[appname]" label = 'application name' @@ -35,37 +61,83 @@ class Command(BaseCommand): can_import_settings = True def handle(self, *args, **options): + self.options_from_settings(options) + if len(args) < 1 and not options['all_applications']: raise CommandError("need one or more arguments for appname") - dotdata = generate_dot(args, **options) + use_pygraphviz = options.get('pygraphviz', False) + use_pydot = options.get('pydot', False) + cli_options = ' '.join(sys.argv[2:]) + dotdata = generate_dot(args, cli_options=cli_options, **options) + dotdata = dotdata.encode('utf-8') if options['outputfile']: - self.render_output(dotdata, **options) + if not use_pygraphviz and not use_pydot: + if HAS_PYGRAPHVIZ: + use_pygraphviz = True + elif HAS_PYDOT: + use_pydot = True + if use_pygraphviz: + self.render_output_pygraphviz(dotdata, **options) + elif use_pydot: + self.render_output_pydot(dotdata, **options) + else: + raise CommandError("Neither pygraphviz nor pydot could be found to generate the image") else: self.print_output(dotdata) - def print_output(self, dotdata): - print(dotdata.encode('utf-8')) + def options_from_settings(self, options): + defaults = getattr(settings, 'GRAPH_MODELS', None) + if defaults: + for option in self.graph_models_options: + long_opt = option._long_opts[0] + if long_opt: + long_opt = long_opt.lstrip("-").replace("-", "_") + if long_opt in defaults: + default_value = None + if not option.default == NO_DEFAULT: + default_value = option.default + if options[option.dest] == default_value: + options[option.dest] = defaults[long_opt] - def render_output(self, dotdata, **kwargs): - try: - import pygraphviz - except ImportError: + def print_output(self, dotdata): + print(dotdata) + + def render_output_pygraphviz(self, dotdata, **kwargs): + """Renders the image using pygraphviz""" + if not HAS_PYGRAPHVIZ: raise CommandError("You need to install pygraphviz python module") - vizdata = ' '.join(dotdata.split("\n")).strip().encode('utf-8') version = pygraphviz.__version__.rstrip("-svn") try: if tuple(int(v) for v in version.split('.')) < (0, 36): # HACK around old/broken AGraph before version 0.36 (ubuntu ships with this old version) import tempfile tmpfile = tempfile.NamedTemporaryFile() - tmpfile.write(vizdata) + tmpfile.write(dotdata) tmpfile.seek(0) - vizdata = tmpfile.name + dotdata = tmpfile.name except ValueError: pass - graph = pygraphviz.AGraph(vizdata) + graph = pygraphviz.AGraph(dotdata) graph.layout(prog=kwargs['layout']) graph.draw(kwargs['outputfile']) + + def render_output_pydot(self, dotdata, **kwargs): + """Renders the image using pydot""" + if not HAS_PYDOT: + raise CommandError("You need to install pydot python module") + + graph = pydot.graph_from_dot_data(dotdata) + if not graph: + raise CommandError("pydot returned an error") + output_file = kwargs['outputfile'] + formats = ['bmp', 'canon', 'cmap', 'cmapx', 'cmapx_np', 'dot', 'dia', 'emf', + 'em', 'fplus', 'eps', 'fig', 'gd', 'gd2', 'gif', 'gv', 'imap', + 'imap_np', 'ismap', 'jpe', 'jpeg', 'jpg', 'metafile', 'pdf', + 'pic', 'plain', 'plain-ext', 'png', 'pov', 'ps', 'ps2', 'svg', + 'svgz', 'tif', 'tiff', 'tk', 'vml', 'vmlz', 'vrml', 'wbmp', 'xdot'] + ext = output_file[output_file.rfind('.') + 1:] + format = ext if ext in formats else 'raw' + graph.write(output_file, format=format) diff --git a/awx/lib/site-packages/django_extensions/management/commands/pipchecker.py b/awx/lib/site-packages/django_extensions/management/commands/pipchecker.py index 0cc7b3e27d..4fff74edae 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/pipchecker.py +++ b/awx/lib/site-packages/django_extensions/management/commands/pipchecker.py @@ -7,18 +7,16 @@ import urllib2 import urlparse import xmlrpclib from distutils.version import LooseVersion +from django.core.management.base import NoArgsCommand +from django_extensions.management.color import color_style +from optparse import make_option +from pip.req import parse_requirements try: import requests + HAS_REQUESTS = True except ImportError: - print("""The requests library is not installed. To continue: - pip install requests""") - -from optparse import make_option - -from django.core.management.base import NoArgsCommand - -from pip.req import parse_requirements + HAS_REQUESTS = False class Command(NoArgsCommand): @@ -41,6 +39,8 @@ class Command(NoArgsCommand): help = "Scan pip requirement files for out-of-date packages." def handle_noargs(self, **options): + self.style = color_style() + self.options = options if options["requirements"]: req_files = options["requirements"] @@ -74,7 +74,10 @@ class Command(NoArgsCommand): self.github_api_token = None # only 50 requests per hour self.check_pypi() - self.check_github() + if HAS_REQUESTS: + self.check_github() + else: + print(self.style.ERROR("Cannot check github urls. The requests library is not installed. ( pip install requests )")) self.check_other() def _urlopen_as_json(self, url, headers=None): @@ -105,16 +108,16 @@ class Command(NoArgsCommand): available_version = None if not available_version: - msg = "release is not on pypi (check capitalization and/or --extra-index-url)" + msg = self.style.WARN("release is not on pypi (check capitalization and/or --extra-index-url)") elif self.options['show_newer'] and dist_version > available_version: - msg = "{0} available (newer installed)".format(available_version) + msg = self.style.INFO("{0} available (newer installed)".format(available_version)) elif available_version > dist_version: - msg = "{0} available".format(available_version) + msg = self.style.INFO("{0} available".format(available_version)) else: msg = "up to date" del self.reqs[name] continue - pkg_info = "{dist.project_name} {dist.version}".format(dist=dist) + pkg_info = self.style.BOLD("{dist.project_name} {dist.version}".format(dist=dist)) else: msg = "not installed" pkg_info = name @@ -164,8 +167,12 @@ class Command(NoArgsCommand): """ for name, req in self.reqs.items(): req_url = req["url"] + if not req_url: + continue if req_url.startswith("git") and "github.com/" not in req_url: continue + if req_url.endswith(".tar.gz") or req_url.endswith(".tar.bz2") or req_url.endswith(".zip"): + continue headers = { "content-type": "application/json", @@ -175,7 +182,7 @@ class Command(NoArgsCommand): try: user, repo = urlparse.urlparse(req_url).path.split("#")[0].strip("/").rstrip("/").split("/") except (ValueError, IndexError) as e: - print("\nFailed to parse %r: %s\n" % (req_url, e)) + print(self.style.ERROR("\nFailed to parse %r: %s\n" % (req_url, e))) continue try: @@ -186,21 +193,22 @@ class Command(NoArgsCommand): return if "message" in test_auth and test_auth["message"] == "Bad credentials": - print("\nGithub API: Bad credentials. Aborting!\n") + print(self.style.ERROR("\nGithub API: Bad credentials. Aborting!\n")) return elif "message" in test_auth and test_auth["message"].startswith("API Rate Limit Exceeded"): - print("\nGithub API: Rate Limit Exceeded. Aborting!\n") + print(self.style.ERROR("\nGithub API: Rate Limit Exceeded. Aborting!\n")) return + frozen_commit_sha = None if ".git" in repo: repo_name, frozen_commit_full = repo.split(".git") if frozen_commit_full.startswith("@"): frozen_commit_sha = frozen_commit_full[1:] elif "@" in repo: repo_name, frozen_commit_sha = repo.split("@") - else: - frozen_commit_sha = None - msg = "repo is not frozen" + + if frozen_commit_sha is None: + msg = self.style.ERROR("repo is not frozen") if frozen_commit_sha: branch_url = "https://api.github.com/repos/{0}/{1}/branches".format(user, repo_name) @@ -214,14 +222,16 @@ class Command(NoArgsCommand): frozen_commit_data = requests.get(frozen_commit_url, headers=headers).json() if "message" in frozen_commit_data and frozen_commit_data["message"] == "Not Found": - msg = "{0} not found in {1}. Repo may be private.".format(frozen_commit_sha[:10], name) + msg = self.style.ERROR("{0} not found in {1}. Repo may be private.".format(frozen_commit_sha[:10], name)) elif frozen_commit_sha in [branch["commit"]["sha"] for branch in branch_data]: - msg = "up to date" + msg = self.style.BOLD("up to date") else: - msg = "{0} is not the head of any branch".format(frozen_commit_data["sha"][:10]) + msg = self.style.INFO("{0} is not the head of any branch".format(frozen_commit_data["sha"][:10])) if "dist" in req: pkg_info = "{dist.project_name} {dist.version}".format(dist=req["dist"]) + elif frozen_commit_sha is None: + pkg_info = name else: pkg_info = "{0} {1}".format(name, frozen_commit_sha[:10]) print("{pkg_info:40} {msg}".format(pkg_info=pkg_info, msg=msg)) @@ -235,7 +245,7 @@ class Command(NoArgsCommand): support here. """ if self.reqs: - print("\nOnly pypi and github based requirements are supported:") + print(self.style.ERROR("\nOnly pypi and github based requirements are supported:")) for name, req in self.reqs.items(): if "dist" in req: pkg_info = "{dist.project_name} {dist.version}".format(dist=req["dist"]) @@ -243,4 +253,4 @@ class Command(NoArgsCommand): pkg_info = "{url}".format(url=req["url"]) else: pkg_info = "unknown package" - print("{pkg_info:40} is not a pypi or github requirement".format(pkg_info=pkg_info)) + print(self.style.BOLD("{pkg_info:40} is not a pypi or github requirement".format(pkg_info=pkg_info))) diff --git a/awx/lib/site-packages/django_extensions/management/commands/print_user_for_session.py b/awx/lib/site-packages/django_extensions/management/commands/print_user_for_session.py index 000453aa81..d61059fcc2 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/print_user_for_session.py +++ b/awx/lib/site-packages/django_extensions/management/commands/print_user_for_session.py @@ -1,12 +1,16 @@ +from importlib import import_module +from django.conf import settings from django.core.management.base import BaseCommand, CommandError + try: from django.contrib.auth import get_user_model # Django 1.5 except ImportError: from django_extensions.future_1_5 import get_user_model -from django.contrib.sessions.models import Session -import re -SESSION_RE = re.compile("^[0-9a-f]{20,40}$") +try: + from django.contrib.sessions.backends.base import VALID_KEY_CHARS # Django 1.5 +except ImportError: + VALID_KEY_CHARS = "abcdef0123456789" class Command(BaseCommand): @@ -22,30 +26,41 @@ class Command(BaseCommand): def handle(self, *args, **options): if len(args) > 1: raise CommandError("extra arguments supplied") + if len(args) < 1: raise CommandError("session_key argument missing") + key = args[0].lower() - if not SESSION_RE.match(key): + + if not set(key).issubset(set(VALID_KEY_CHARS)): raise CommandError("malformed session key") - try: - session = Session.objects.get(pk=key) - except Session.DoesNotExist: + + engine = import_module(settings.SESSION_ENGINE) + + if not engine.SessionStore().exists(key): print("Session Key does not exist. Expired?") return - data = session.get_decoded() - print('Session to Expire: %s' % session.expire_date) + session = engine.SessionStore(key) + data = session.load() + + print('Session to Expire: %s' % session.get_expiry_date()) print('Raw Data: %s' % data) + uid = data.get('_auth_user_id', None) + if uid is None: print('No user associated with session') return + print("User id: %s" % uid) + User = get_user_model() try: user = User.objects.get(pk=uid) except User.DoesNotExist: print("No user associated with that id.") return + for key in ['username', 'email', 'first_name', 'last_name']: print("%s: %s" % (key, getattr(user, key))) diff --git a/awx/lib/site-packages/django_extensions/management/commands/reset_db.py b/awx/lib/site-packages/django_extensions/management/commands/reset_db.py index e3f3fad3b6..a94ef77665 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/reset_db.py +++ b/awx/lib/site-packages/django_extensions/management/commands/reset_db.py @@ -1,14 +1,12 @@ """ originally from http://www.djangosnippets.org/snippets/828/ by dnordberg """ +import logging +from optparse import make_option -from six.moves import input from django.conf import settings from django.core.management.base import CommandError, BaseCommand -import django -import logging -import re -from optparse import make_option +from six.moves import input class Command(BaseCommand): @@ -27,32 +25,13 @@ class Command(BaseCommand): help='Use another password for the database then defined in settings.py'), make_option('-D', '--dbname', action='store', dest='dbname', default=None, - help='Use another database name then defined in settings.py (For PostgreSQL this defaults to "template1")'), + help='Use another database name then defined in settings.py'), make_option('-R', '--router', action='store', - dest='router', default=None, + dest='router', default='default', help='Use this router-database other then defined in settings.py'), ) help = "Resets the database for this project." - def set_db_settings(self, *args, **options): - if django.get_version() >= "1.2": - router = options.get('router') - if router is None: - return False - - # retrieve this with the 'using' argument - dbinfo = settings.DATABASES.get(router) - settings.DATABASE_ENGINE = dbinfo.get('ENGINE').split('.')[-1] - settings.DATABASE_USER = dbinfo.get('USER') - settings.DATABASE_PASSWORD = dbinfo.get('PASSWORD') - settings.DATABASE_NAME = dbinfo.get('NAME') - settings.DATABASE_HOST = dbinfo.get('HOST') - settings.DATABASE_PORT = dbinfo.get('PORT') - return True - else: - # settings are set for django < 1.2 no modification needed - return True - def handle(self, *args, **options): """ Resets the database for this project. @@ -60,12 +39,21 @@ class Command(BaseCommand): Note: Transaction wrappers are in reverse as a work around for autocommit, anybody know how to do this the right way? """ + router = options.get('router') + dbinfo = settings.DATABASES.get(router) + if dbinfo is None: + raise CommandError("Unknown database router %s" % router) - if django.get_version() >= "1.2": - got_db_settings = self.set_db_settings(*args, **options) - if not got_db_settings: - raise CommandError("You are using Django %s which requires to specify the db-router.\nPlease specify the router by adding --router= to this command." % django.get_version()) - return + engine = dbinfo.get('ENGINE').split('.')[-1] + user = options.get('user') or dbinfo.get('USER') + password = options.get('password') or dbinfo.get('PASSWORD') + + database_name = options.get('dbname') or dbinfo.get('NAME') + if database_name == '': + raise CommandError("You need to specify DATABASE_NAME in your Django settings file.") + + database_host = dbinfo.get('HOST') + database_port = dbinfo.get('PORT') verbosity = int(options.get('verbosity', 1)) if options.get('interactive'): @@ -75,7 +63,7 @@ This will IRREVERSIBLY DESTROY ALL data in the database "%s". Are you sure you want to do this? -Type 'yes' to continue, or 'no' to cancel: """ % (settings.DATABASE_NAME,)) +Type 'yes' to continue, or 'no' to cancel: """ % (database_name,)) else: confirm = 'yes' @@ -83,71 +71,57 @@ Type 'yes' to continue, or 'no' to cancel: """ % (settings.DATABASE_NAME,)) print("Reset cancelled.") return - postgis = re.compile('.*postgis') - engine = settings.DATABASE_ENGINE - user = options.get('user', settings.DATABASE_USER) - if user is None: - user = settings.DATABASE_USER - password = options.get('password', settings.DATABASE_PASSWORD) - if password is None: - password = settings.DATABASE_PASSWORD - if engine in ('sqlite3', 'spatialite'): import os try: logging.info("Unlinking %s database" % engine) - os.unlink(settings.DATABASE_NAME) + os.unlink(database_name) except OSError: pass - elif engine == 'mysql': + + elif engine in ('mysql',): import MySQLdb as Database kwargs = { 'user': user, 'passwd': password, } - if settings.DATABASE_HOST.startswith('/'): - kwargs['unix_socket'] = settings.DATABASE_HOST + if database_host.startswith('/'): + kwargs['unix_socket'] = database_host else: - kwargs['host'] = settings.DATABASE_HOST - if settings.DATABASE_PORT: - kwargs['port'] = int(settings.DATABASE_PORT) + kwargs['host'] = database_host + + if database_port: + kwargs['port'] = int(database_port) connection = Database.connect(**kwargs) - drop_query = 'DROP DATABASE IF EXISTS `%s`' % settings.DATABASE_NAME + drop_query = 'DROP DATABASE IF EXISTS `%s`' % database_name utf8_support = options.get('no_utf8_support', False) and '' or 'CHARACTER SET utf8' - create_query = 'CREATE DATABASE `%s` %s' % (settings.DATABASE_NAME, utf8_support) + create_query = 'CREATE DATABASE `%s` %s' % (database_name, utf8_support) logging.info('Executing... "' + drop_query + '"') connection.query(drop_query) logging.info('Executing... "' + create_query + '"') connection.query(create_query) - elif engine == 'postgresql' or engine == 'postgresql_psycopg2' or postgis.match(engine): + elif engine in ('postgresql', 'postgresql_psycopg2', 'postgis'): if engine == 'postgresql': import psycopg as Database # NOQA - elif engine == 'postgresql_psycopg2' or postgis.match(engine): + elif engine in ('postgresql_psycopg2', 'postgis'): import psycopg2 as Database # NOQA - if settings.DATABASE_NAME == '': - from django.core.exceptions import ImproperlyConfigured - raise ImproperlyConfigured("You need to specify DATABASE_NAME in your Django settings file.") - - database_name = options.get('dbname', 'template1') - if options.get('dbname') is None: - database_name = 'template1' - conn_string = "dbname=%s" % database_name - if settings.DATABASE_USER: + conn_string = "dbname=template1" + if user: conn_string += " user=%s" % user - if settings.DATABASE_PASSWORD: + if password: conn_string += " password='%s'" % password - if settings.DATABASE_HOST: - conn_string += " host=%s" % settings.DATABASE_HOST - if settings.DATABASE_PORT: - conn_string += " port=%s" % settings.DATABASE_PORT + if database_host: + conn_string += " host=%s" % database_host + if database_port: + conn_string += " port=%s" % database_port connection = Database.connect(conn_string) connection.set_isolation_level(0) # autocommit false cursor = connection.cursor() - drop_query = 'DROP DATABASE %s' % settings.DATABASE_NAME + drop_query = 'DROP DATABASE %s;' % database_name logging.info('Executing... "' + drop_query + '"') try: @@ -155,18 +129,18 @@ Type 'yes' to continue, or 'no' to cancel: """ % (settings.DATABASE_NAME,)) except Database.ProgrammingError as e: logging.info("Error: %s" % str(e)) - # Encoding should be SQL_ASCII (7-bit postgres default) or prefered UTF8 (8-bit) - create_query = "CREATE DATABASE %s" % settings.DATABASE_NAME - if settings.DATABASE_USER: - create_query += " WITH OWNER = %s " % settings.DATABASE_USER + create_query = "CREATE DATABASE %s" % database_name + create_query += " WITH OWNER = %s " % user create_query += " ENCODING = 'UTF8'" - if postgis.match(engine): + if engine == 'postgis': create_query += ' TEMPLATE = template_postgis' + if settings.DEFAULT_TABLESPACE: create_query += ' TABLESPACE = %s;' % settings.DEFAULT_TABLESPACE else: create_query += ';' + logging.info('Executing... "' + create_query + '"') cursor.execute(create_query) diff --git a/awx/lib/site-packages/django_extensions/management/commands/runjob.py b/awx/lib/site-packages/django_extensions/management/commands/runjob.py index 858f654972..b698054013 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/runjob.py +++ b/awx/lib/site-packages/django_extensions/management/commands/runjob.py @@ -50,11 +50,3 @@ class Command(LabelCommand): print("Run a single maintenance job. Please specify the name of the job.") return self.runjob(app_name, job_name, options) - -# Backwards compatibility for Django r9110 -if not [opt for opt in Command.option_list if opt.dest == 'verbosity']: - Command.option_list += ( - make_option('--verbosity', '-v', action="store", dest="verbosity", - default='1', type='choice', choices=['0', '1', '2'], - help="Verbosity level; 0=minimal output, 1=normal output, 2=all output"), - ) diff --git a/awx/lib/site-packages/django_extensions/management/commands/runjobs.py b/awx/lib/site-packages/django_extensions/management/commands/runjobs.py index 063f8ec4a9..c8a5d621b1 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/runjobs.py +++ b/awx/lib/site-packages/django_extensions/management/commands/runjobs.py @@ -87,11 +87,3 @@ class Command(LabelCommand): return self.runjobs(when, options) self.runjobs_by_signals(when, options) - -# Backwards compatibility for Django r9110 -if not [opt for opt in Command.option_list if opt.dest == 'verbosity']: - Command.option_list += ( - make_option('--verbosity', '-v', action="store", dest="verbosity", - default='1', type='choice', choices=['0', '1', '2'], - help="Verbosity level; 0=minimal output, 1=normal output, 2=all output"), - ) diff --git a/awx/lib/site-packages/django_extensions/management/commands/runprofileserver.py b/awx/lib/site-packages/django_extensions/management/commands/runprofileserver.py index 84206a3c98..cd82a1cece 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/runprofileserver.py +++ b/awx/lib/site-packages/django_extensions/management/commands/runprofileserver.py @@ -140,7 +140,9 @@ class Command(BaseCommand): def handle(self, addrport='', *args, **options): import django - from django.core.servers.basehttp import run, WSGIServerException + import socket + import errno + from django.core.servers.basehttp import run try: from django.core.servers.basehttp import get_internal_wsgi_application as WSGIHandler except ImportError: @@ -152,6 +154,11 @@ class Command(BaseCommand): except ImportError: HAS_ADMINMEDIAHANDLER = False + try: + from django.core.servers.basehttp import WSGIServerException as wsgi_server_exc_cls + except ImportError: # Django 1.6 + wsgi_server_exc_cls = socket.error + if args: raise CommandError('Usage is runserver %s' % self.args) if not addrport: @@ -176,7 +183,10 @@ class Command(BaseCommand): def inner_run(): import os import time - import hotshot + try: + import hotshot + except ImportError: + pass # python 3.x USE_CPROFILE = options.get('use_cprofile', False) USE_LSPROF = options.get('use_lsprof', False) if USE_LSPROF: @@ -266,15 +276,22 @@ class Command(BaseCommand): handler = StaticFilesHandler(handler) handler = make_profiler_handler(handler) run(addr, int(port), handler) - except WSGIServerException as e: + except wsgi_server_exc_cls as e: # Use helpful error messages instead of ugly tracebacks. ERRORS = { - 13: "You don't have permission to access that port.", - 98: "That port is already in use.", - 99: "That IP address can't be assigned-to.", + errno.EACCES: "You don't have permission to access that port.", + errno.EADDRINUSE: "That port is already in use.", + errno.EADDRNOTAVAIL: "That IP address can't be assigned-to.", } + if not isinstance(e, socket.error): # Django < 1.6 + ERRORS[13] = ERRORS.pop(errno.EACCES) + ERRORS[98] = ERRORS.pop(errno.EADDRINUSE) + ERRORS[99] = ERRORS.pop(errno.EADDRNOTAVAIL) try: - error_text = ERRORS[e.args[0].args[0]] + if not isinstance(e, socket.error): # Django < 1.6 + error_text = ERRORS[e.args[0].args[0]] + else: + error_text = ERRORS[e.errno] except (AttributeError, KeyError): error_text = str(e) sys.stderr.write(self.style.ERROR("Error: %s" % error_text) + '\n') diff --git a/awx/lib/site-packages/django_extensions/management/commands/runscript.py b/awx/lib/site-packages/django_extensions/management/commands/runscript.py index a34080fc47..8b063c3797 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/runscript.py +++ b/awx/lib/site-packages/django_extensions/management/commands/runscript.py @@ -1,4 +1,5 @@ from django.core.management.base import BaseCommand +from django.conf import settings from optparse import make_option import imp @@ -37,8 +38,6 @@ class Command(BaseCommand): args = "script [script ...]" def handle(self, *scripts, **options): - from django.db.models import get_apps - NOTICE = self.style.SQL_TABLE NOTICE2 = self.style.SQL_FIELD ERROR = self.style.ERROR @@ -91,10 +90,10 @@ class Command(BaseCommand): module_tuple = imp.find_module(package, path) path = imp.load_module(package, *module_tuple).__path__ imp.find_module(mod.split('.')[-1], path) + t = __import__(mod, [], [], [" "]) except (ImportError, AttributeError): return False - t = __import__(mod, [], [], [" "]) #if verbosity > 1: # print(NOTICE("Found script %s ..." % mod)) if hasattr(t, "run"): @@ -111,10 +110,9 @@ class Command(BaseCommand): """ find script module which contains 'run' attribute """ modules = [] # first look in apps - for app in get_apps(): - app_name = app.__name__.split(".")[:-1] # + ['fixtures'] + for app in settings.INSTALLED_APPS: for subdir in subdirs: - mod = my_import(".".join(app_name + [subdir, script])) + mod = my_import("%s.%s.%s" % (app, subdir, script)) if mod: modules.append(mod) @@ -147,12 +145,3 @@ class Command(BaseCommand): if verbosity > 1: print(NOTICE2("Running script '%s' ..." % mod.__name__)) run_script(mod, *script_args) - - -# Backwards compatibility for Django r9110 -if not [opt for opt in Command.option_list if opt.dest == 'verbosity']: - Command.option_list += ( - make_option('--verbosity', '-v', action="store", dest="verbosity", - default='1', type='choice', choices=['0', '1', '2'], - help="Verbosity level; 0=minimal output, 1=normal output, 2=all output"), - ) diff --git a/awx/lib/site-packages/django_extensions/management/commands/runserver_plus.py b/awx/lib/site-packages/django_extensions/management/commands/runserver_plus.py index 678e8eb91b..b7cebcaa7f 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/runserver_plus.py +++ b/awx/lib/site-packages/django_extensions/management/commands/runserver_plus.py @@ -1,13 +1,17 @@ -from django.conf import settings -from django.core.management.base import BaseCommand, CommandError -from django_extensions.management.utils import setup_logger, RedirectHandler -from optparse import make_option import os import re import socket import sys import time +from optparse import make_option + +from django.conf import settings +from django.core.management.base import BaseCommand, CommandError +from django_extensions.management.utils import setup_logger, RedirectHandler +from django_extensions.management.technical_response import null_technical_500_response + + try: if 'django.contrib.staticfiles' in settings.INSTALLED_APPS: from django.contrib.staticfiles.handlers import StaticFilesHandler @@ -20,6 +24,7 @@ try: except ImportError: USE_STATICFILES = False + naiveip_re = re.compile(r"""^(?: (?P (?P\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address @@ -28,11 +33,10 @@ naiveip_re = re.compile(r"""^(?: ):)?(?P\d+)$""", re.X) DEFAULT_PORT = "8000" + import logging logger = logging.getLogger(__name__) -from django_extensions.management.technical_response import null_technical_500_response - class Command(BaseCommand): option_list = BaseCommand.option_list + ( @@ -112,8 +116,18 @@ class Command(BaseCommand): from django.core.servers.basehttp import get_internal_wsgi_application as WSGIHandler except ImportError: from django.core.handlers.wsgi import WSGIHandler # noqa + try: from werkzeug import run_simple, DebuggedApplication + + # Set colored output + if settings.DEBUG: + try: + set_werkzeug_log_color() + except: # We are dealing with some internals, anything could go wrong + print("Wrapping internal werkzeug logger for color highlighting has failed!") + pass + except ImportError: raise CommandError("Werkzeug is required to use runserver_plus. Please visit http://werkzeug.pocoo.org/ or install via pip. (pip install Werkzeug)") @@ -233,3 +247,47 @@ class Command(BaseCommand): ssl_context=ssl_context ) inner_run() + + +def set_werkzeug_log_color(): + """Try to set color to the werkzeug log. + """ + from django.core.management.color import color_style + from werkzeug.serving import WSGIRequestHandler + from werkzeug._internal import _log + + _style = color_style() + _orig_log = WSGIRequestHandler.log + + def werk_log(self, type, message, *args): + try: + msg = '%s - - [%s] %s' % ( + self.address_string(), + self.log_date_time_string(), + message % args, + ) + http_code = str(args[1]) + except: + return _orig_log(type, message, *args) + + # Utilize terminal colors, if available + if http_code[0] == '2': + # Put 2XX first, since it should be the common case + msg = _style.HTTP_SUCCESS(msg) + elif http_code[0] == '1': + msg = _style.HTTP_INFO(msg) + elif http_code == '304': + msg = _style.HTTP_NOT_MODIFIED(msg) + elif http_code[0] == '3': + msg = _style.HTTP_REDIRECT(msg) + elif http_code == '404': + msg = _style.HTTP_NOT_FOUND(msg) + elif http_code[0] == '4': + msg = _style.HTTP_BAD_REQUEST(msg) + else: + # Any 5XX, or any other response + msg = _style.HTTP_SERVER_ERROR(msg) + + _log(type, msg) + + WSGIRequestHandler.log = werk_log diff --git a/awx/lib/site-packages/django_extensions/management/commands/sqldiff.py b/awx/lib/site-packages/django_extensions/management/commands/sqldiff.py index d89d218eef..12fe804038 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/sqldiff.py +++ b/awx/lib/site-packages/django_extensions/management/commands/sqldiff.py @@ -48,9 +48,17 @@ def flatten(l, ltypes=(list, tuple)): def all_local_fields(meta): - all_fields = meta.local_fields[:] - for parent in meta.parents: - all_fields.extend(all_local_fields(parent._meta)) + all_fields = [] + if meta.managed: + if meta.proxy: + for parent in meta.parents: + all_fields.extend(all_local_fields(parent._meta)) + else: + for f in meta.local_fields: + col_type = f.db_type(connection=connection) + if col_type is None: + continue + all_fields.append(f) return all_fields @@ -63,6 +71,8 @@ class SQLDiff(object): 'table-missing-in-db', 'field-missing-in-db', 'field-missing-in-model', + 'fkey-missing-in-db', + 'fkey-missing-in-model', 'index-missing-in-db', 'index-missing-in-model', 'unique-missing-in-db', @@ -77,6 +87,8 @@ class SQLDiff(object): 'table-missing-in-db': "table '%(0)s' missing in database", 'field-missing-in-db': "field '%(1)s' defined in model but missing in database", 'field-missing-in-model': "field '%(1)s' defined in database but missing in model", + 'fkey-missing-in-db': "field '%(1)s' FOREIGN KEY defined in model but missing in database", + 'fkey-missing-in-model': "field '%(1)s' FOREIGN KEY defined in database but missing in model", 'index-missing-in-db': "field '%(1)s' INDEX defined in model but missing in database", 'index-missing-in-model': "field '%(1)s' INDEX defined in database schema but missing in model", 'unique-missing-in-db': "field '%(1)s' UNIQUE defined in model but missing in database", @@ -86,12 +98,13 @@ class SQLDiff(object): 'notnull-differ': "field '%(1)s' null differ: db='%(3)s', model='%(2)s'", } - SQL_FIELD_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD'), style.SQL_FIELD(qn(args[1])), style.SQL_COLTYPE(args[2])) + SQL_FIELD_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD COLUMN'), style.SQL_FIELD(qn(args[1])), ' '.join(style.SQL_COLTYPE(a) if i == 0 else style.SQL_KEYWORD(a) for i, a in enumerate(args[2:]))) SQL_FIELD_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s\n\t%s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('DROP COLUMN'), style.SQL_FIELD(qn(args[1]))) - SQL_INDEX_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s (%s);" % (style.SQL_KEYWORD('CREATE INDEX'), style.SQL_TABLE(qn("%s_idx" % '_'.join(args[0:2]))), style.SQL_KEYWORD('ON'), style.SQL_TABLE(qn(args[0])), style.SQL_FIELD(qn(args[1]))) + SQL_FKEY_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s %s %s %s (%s)%s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD COLUMN'), style.SQL_FIELD(qn(args[1])), ' '.join(style.SQL_COLTYPE(a) if i == 0 else style.SQL_KEYWORD(a) for i, a in enumerate(args[4:])), style.SQL_KEYWORD('REFERENCES'), style.SQL_TABLE(qn(args[2])), style.SQL_FIELD(qn(args[3])), connection.ops.deferrable_sql()) + SQL_INDEX_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s (%s%s);" % (style.SQL_KEYWORD('CREATE INDEX'), style.SQL_TABLE(qn("%s" % '_'.join(a for a in args[0:3] if a))), style.SQL_KEYWORD('ON'), style.SQL_TABLE(qn(args[0])), style.SQL_FIELD(qn(args[1])), style.SQL_KEYWORD(args[3])) # FIXME: need to lookup index name instead of just appending _idx to table + fieldname - SQL_INDEX_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s;" % (style.SQL_KEYWORD('DROP INDEX'), style.SQL_TABLE(qn("%s_idx" % '_'.join(args[0:2])))) - SQL_UNIQUE_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s (%s);" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD'), style.SQL_KEYWORD('UNIQUE'), style.SQL_FIELD(qn(args[1]))) + SQL_INDEX_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s;" % (style.SQL_KEYWORD('DROP INDEX'), style.SQL_TABLE(qn("%s" % '_'.join(a for a in args[0:3] if a)))) + SQL_UNIQUE_MISSING_IN_DB = lambda self, style, qn, args: "%s %s\n\t%s %s (%s);" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('ADD COLUMN'), style.SQL_KEYWORD('UNIQUE'), style.SQL_FIELD(qn(args[1]))) # FIXME: need to lookup unique constraint name instead of appending _key to table + fieldname SQL_UNIQUE_MISSING_IN_MODEL = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD('DROP'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_TABLE(qn("%s_key" % ('_'.join(args[:2]))))) SQL_FIELD_TYPE_DIFFER = lambda self, style, qn, args: "%s %s\n\t%s %s %s;" % (style.SQL_KEYWORD('ALTER TABLE'), style.SQL_TABLE(qn(args[0])), style.SQL_KEYWORD("MODIFY"), style.SQL_FIELD(qn(args[1])), style.SQL_COLTYPE(args[2])) @@ -124,6 +137,8 @@ class SQLDiff(object): 'table-missing-in-db': self.SQL_TABLE_MISSING_IN_DB, 'field-missing-in-db': self.SQL_FIELD_MISSING_IN_DB, 'field-missing-in-model': self.SQL_FIELD_MISSING_IN_MODEL, + 'fkey-missing-in-db': self.SQL_FKEY_MISSING_IN_DB, + 'fkey-missing-in-model': self.SQL_FIELD_MISSING_IN_MODEL, 'index-missing-in-db': self.SQL_INDEX_MISSING_IN_DB, 'index-missing-in-model': self.SQL_INDEX_MISSING_IN_MODEL, 'unique-missing-in-db': self.SQL_UNIQUE_MISSING_IN_DB, @@ -254,7 +269,12 @@ class SQLDiff(object): if field.db_index: attname = field.db_column or field.attname if not attname in table_indexes: - self.add_difference('index-missing-in-db', table_name, attname) + self.add_difference('index-missing-in-db', table_name, attname, '', '') + db_type = field.db_type(connection=connection) + if db_type.startswith('varchar'): + self.add_difference('index-missing-in-db', table_name, attname, 'like', ' varchar_pattern_ops') + if db_type.startswith('text'): + self.add_difference('index-missing-in-db', table_name, attname, 'like', ' text_pattern_ops') def find_index_missing_in_model(self, meta, table_indexes, table_name): fields = dict([(field.name, field) for field in all_local_fields(meta)]) @@ -270,6 +290,9 @@ class SQLDiff(object): if att_opts['unique'] and att_name in flatten(meta.unique_together): continue self.add_difference('index-missing-in-model', table_name, att_name) + db_type = field.db_type(connection=connection) + if db_type.startswith('varchar') or db_type.startswith('text'): + self.add_difference('index-missing-in-model', table_name, att_name, 'like') def find_field_missing_in_model(self, fieldmap, table_description, table_name): for row in table_description: @@ -280,7 +303,16 @@ class SQLDiff(object): db_fields = [row[0] for row in table_description] for field_name, field in fieldmap.iteritems(): if field_name not in db_fields: - self.add_difference('field-missing-in-db', table_name, field_name, field.db_type(connection=connection)) + field_output = [] + if field.rel: + field_output.extend([field.rel.to._meta.db_table, field.rel.to._meta.get_field(field.rel.field_name).column]) + op = 'fkey-missing-in-db' + else: + op = 'field-missing-in-db' + field_output.append(field.db_type(connection=connection)) + if not field.null: + field_output.append('NOT NULL') + self.add_difference(op, table_name, field_name, *field_output) def find_field_type_differ(self, meta, table_description, table_name, func=None): db_fields = dict([(row[0], row) for row in table_description]) @@ -566,7 +598,7 @@ class PostgresqlSQLDiff(SQLDiff): if check_constraint: check_constraint = check_constraint.replace("((", "(") check_constraint = check_constraint.replace("))", ")") - check_constraint = '("'.join([')' in e and '" '.join(e.split(" ", 1)) or e for e in check_constraint.split("(")]) + check_constraint = '("'.join([')' in e and '" '.join(p.strip('"') for p in e.split(" ", 1)) or e for e in check_constraint.split("(")]) # TODO: might be more then one constraint in definition ? db_type += ' ' + check_constraint null = self.null.get((tablespace, table_name, field.attname), 'fixme') @@ -646,7 +678,7 @@ because you haven't specified the DATABASE_ENGINE setting. Edit your settings file and change DATABASE_ENGINE to something like 'postgresql' or 'mysql'.""") if options.get('all_applications', False): - app_models = models.get_models() + app_models = models.get_models(include_auto_created=True) else: if not app_labels: raise CommandError('Enter at least one appname.') @@ -657,7 +689,7 @@ Edit your settings file and change DATABASE_ENGINE to something like 'postgresql app_models = [] for app in app_list: - app_models.extend(models.get_models(app)) + app_models.extend(models.get_models(app, include_auto_created=True)) ## remove all models that are not managed by Django #app_models = [model for model in app_models if getattr(model._meta, 'managed', True)] diff --git a/awx/lib/site-packages/django_extensions/management/commands/sync_media_s3.py b/awx/lib/site-packages/django_extensions/management/commands/sync_media_s3.py index 2fc9411344..dfa93dea31 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/sync_media_s3.py +++ b/awx/lib/site-packages/django_extensions/management/commands/sync_media_s3.py @@ -1,52 +1,9 @@ -""" -Sync Media to S3 -================ - -Django command that scans all files in your settings.MEDIA_ROOT folder and -uploads them to S3 with the same directory structure. - -This command can optionally do the following but it is off by default: -* gzip compress any CSS and Javascript files it finds and adds the appropriate - 'Content-Encoding' header. -* set a far future 'Expires' header for optimal caching. - -Note: This script requires the Python boto library and valid Amazon Web -Services API keys. - -Required settings.py variables: -AWS_ACCESS_KEY_ID = '' -AWS_SECRET_ACCESS_KEY = '' -AWS_BUCKET_NAME = '' - -When you call this command with the `--renamegzip` param, it will add -the '.gz' extension to the file name. But Safari just doesn't recognize -'.gz' files and your site won't work on it! To fix this problem, you can -set any other extension (like .jgz) in the `SYNC_S3_RENAME_GZIP_EXT` -variable. - -Command options are: - -p PREFIX, --prefix=PREFIX - The prefix to prepend to the path on S3. - --gzip Enables gzipping CSS and Javascript files. - --expires Enables setting a far future expires header. - --force Skip the file mtime check to force upload of all - files. - --filter-list Override default directory and file exclusion - filters. (enter as comma seperated line) - --renamegzip Enables renaming of gzipped files by appending '.gz'. - to the original file name. This way your original - assets will not be replaced by the gzipped ones. - You can change the extension setting the - `SYNC_S3_RENAME_GZIP_EXT` var in your settings.py - file. - --invalidate Invalidates the objects in CloudFront after uploaading - stuff to s3. +import warnings +warnings.simplefilter('default') +warnings.warn("sync_media_s3 is deprecated and will be removed on march 2014; use sync_s3 instead.", + PendingDeprecationWarning) -TODO: - * Use fnmatch (or regex) to allow more complex FILTER_LIST rules. - -""" import datetime import email import mimetypes @@ -68,8 +25,9 @@ from django.core.management.base import BaseCommand, CommandError try: import boto import boto.exception + HAS_BOTO = True except ImportError: - raise ImportError("The boto Python library is not installed.") + HAS_BOTO = False class Command(BaseCommand): @@ -127,6 +85,8 @@ class Command(BaseCommand): can_import_settings = True def handle(self, *args, **options): + if not HAS_BOTO: + raise ImportError("The boto Python library is not installed.") # Check for AWS keys in settings if not hasattr(settings, 'AWS_ACCESS_KEY_ID') or not hasattr(settings, 'AWS_SECRET_ACCESS_KEY'): @@ -330,11 +290,3 @@ class Command(BaseCommand): self.uploaded_files.append(file_key) file_obj.close() - -# Backwards compatibility for Django r9110 -if not [opt for opt in Command.option_list if opt.dest == 'verbosity']: - Command.option_list += ( - make_option('-v', '--verbosity', - dest='verbosity', default=1, action='count', - help="Verbose mode. Multiple -v options increase the verbosity."), - ) diff --git a/awx/lib/site-packages/django_extensions/management/commands/sync_s3.py b/awx/lib/site-packages/django_extensions/management/commands/sync_s3.py new file mode 100644 index 0000000000..00fcae3e42 --- /dev/null +++ b/awx/lib/site-packages/django_extensions/management/commands/sync_s3.py @@ -0,0 +1,359 @@ +""" +Sync Media to S3 +================ + +Django command that scans all files in your settings.MEDIA_ROOT and +settings.STATIC_ROOT folders and uploads them to S3 with the same directory +structure. + +This command can optionally do the following but it is off by default: +* gzip compress any CSS and Javascript files it finds and adds the appropriate + 'Content-Encoding' header. +* set a far future 'Expires' header for optimal caching. + +Note: This script requires the Python boto library and valid Amazon Web +Services API keys. + +Required settings.py variables: +AWS_ACCESS_KEY_ID = '' +AWS_SECRET_ACCESS_KEY = '' +AWS_BUCKET_NAME = '' + +When you call this command with the `--renamegzip` param, it will add +the '.gz' extension to the file name. But Safari just doesn't recognize +'.gz' files and your site won't work on it! To fix this problem, you can +set any other extension (like .jgz) in the `SYNC_S3_RENAME_GZIP_EXT` +variable. + +Command options are: + -p PREFIX, --prefix=PREFIX + The prefix to prepend to the path on S3. + --gzip Enables gzipping CSS and Javascript files. + --expires Enables setting a far future expires header. + --force Skip the file mtime check to force upload of all + files. + --filter-list Override default directory and file exclusion + filters. (enter as comma seperated line) + --renamegzip Enables renaming of gzipped files by appending '.gz'. + to the original file name. This way your original + assets will not be replaced by the gzipped ones. + You can change the extension setting the + `SYNC_S3_RENAME_GZIP_EXT` var in your settings.py + file. + --invalidate Invalidates the objects in CloudFront after uploaading + stuff to s3. + --media-only Only MEDIA_ROOT files will be uploaded to S3. + --static-only Only STATIC_ROOT files will be uploaded to S3. + + +TODO: + * Use fnmatch (or regex) to allow more complex FILTER_LIST rules. + +""" +import datetime +import email +import mimetypes +from optparse import make_option +import os +import time +import gzip +try: + from cStringIO import StringIO + assert StringIO +except ImportError: + from StringIO import StringIO + + +from django.conf import settings +from django.core.management.base import BaseCommand, CommandError + +# Make sure boto is available +try: + import boto + import boto.exception + HAS_BOTO = True +except ImportError: + HAS_BOTO = False + + +class Command(BaseCommand): + # Extra variables to avoid passing these around + AWS_ACCESS_KEY_ID = '' + AWS_SECRET_ACCESS_KEY = '' + AWS_BUCKET_NAME = '' + AWS_CLOUDFRONT_DISTRIBUTION = '' + SYNC_S3_RENAME_GZIP_EXT = '' + + DIRECTORIES = '' + FILTER_LIST = ['.DS_Store', '.svn', '.hg', '.git', 'Thumbs.db'] + GZIP_CONTENT_TYPES = ( + 'text/css', + 'application/javascript', + 'application/x-javascript', + 'text/javascript' + ) + + uploaded_files = [] + upload_count = 0 + skip_count = 0 + + option_list = BaseCommand.option_list + ( + make_option('-p', '--prefix', + dest='prefix', + default=getattr(settings, 'SYNC_MEDIA_S3_PREFIX', ''), + help="The prefix to prepend to the path on S3."), + make_option('-d', '--dir', + dest='dir', + help="Custom static root directory to use"), + make_option('--gzip', + action='store_true', dest='gzip', default=False, + help="Enables gzipping CSS and Javascript files."), + make_option('--renamegzip', + action='store_true', dest='renamegzip', default=False, + help="Enables renaming of gzipped assets to have '.gz' appended to the filename."), + make_option('--expires', + action='store_true', dest='expires', default=False, + help="Enables setting a far future expires header."), + make_option('--force', + action='store_true', dest='force', default=False, + help="Skip the file mtime check to force upload of all files."), + make_option('--filter-list', dest='filter_list', + action='store', default='', + help="Override default directory and file exclusion filters. (enter as comma seperated line)"), + make_option('--invalidate', dest='invalidate', default=False, + action='store_true', + help='Invalidates the associated objects in CloudFront'), + make_option('--media-only', dest='media_only', default='', + action='store_true', + help="Only MEDIA_ROOT files will be uploaded to S3"), + make_option('--static-only', dest='static_only', default='', + action='store_true', + help="Only STATIC_ROOT files will be uploaded to S3"), + ) + + help = 'Syncs the complete MEDIA_ROOT structure and files to S3 into the given bucket name.' + args = 'bucket_name' + + can_import_settings = True + + def handle(self, *args, **options): + if not HAS_BOTO: + raise ImportError("The boto Python library is not installed.") + + # Check for AWS keys in settings + if not hasattr(settings, 'AWS_ACCESS_KEY_ID') or not hasattr(settings, 'AWS_SECRET_ACCESS_KEY'): + raise CommandError('Missing AWS keys from settings file. Please supply both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.') + else: + self.AWS_ACCESS_KEY_ID = settings.AWS_ACCESS_KEY_ID + self.AWS_SECRET_ACCESS_KEY = settings.AWS_SECRET_ACCESS_KEY + + if not hasattr(settings, 'AWS_BUCKET_NAME'): + raise CommandError('Missing bucket name from settings file. Please add the AWS_BUCKET_NAME to your settings file.') + else: + if not settings.AWS_BUCKET_NAME: + raise CommandError('AWS_BUCKET_NAME cannot be empty.') + self.AWS_BUCKET_NAME = settings.AWS_BUCKET_NAME + + if not hasattr(settings, 'MEDIA_ROOT'): + raise CommandError('MEDIA_ROOT must be set in your settings.') + else: + if not settings.MEDIA_ROOT: + raise CommandError('MEDIA_ROOT must be set in your settings.') + + self.AWS_CLOUDFRONT_DISTRIBUTION = getattr(settings, 'AWS_CLOUDFRONT_DISTRIBUTION', '') + + self.SYNC_S3_RENAME_GZIP_EXT = \ + getattr(settings, 'SYNC_S3_RENAME_GZIP_EXT', '.gz') + + self.verbosity = int(options.get('verbosity')) + self.prefix = options.get('prefix') + self.do_gzip = options.get('gzip') + self.rename_gzip = options.get('renamegzip') + self.do_expires = options.get('expires') + self.do_force = options.get('force') + self.invalidate = options.get('invalidate') + self.DIRECTORIES = options.get('dir') + self.FILTER_LIST = getattr(settings, 'FILTER_LIST', self.FILTER_LIST) + filter_list = options.get('filter_list') + if filter_list: + # command line option overrides default filter_list and + # settings.filter_list + self.FILTER_LIST = filter_list.split(',') + + self.media_only = options.get('media_only') + self.static_only = options.get('static_only') + # Get directories + if self.media_only and self.static_only: + raise CommandError("Can't use --media-only and --static-only together. Better not use anything...") + elif self.media_only: + self.DIRECTORIES = [settings.MEDIA_ROOT] + elif self.static_only: + self.DIRECTORIES = [settings.STATIC_ROOT] + elif self.DIRECTORIES: + self.DIRECTORIES = [self.DIRECTORIES] + else: + self.DIRECTORIES = [settings.MEDIA_ROOT, settings.STATIC_ROOT] + + # Now call the syncing method to walk the MEDIA_ROOT directory and + # upload all files found. + self.sync_s3() + + # Sending the invalidation request to CloudFront if the user + # requested this action + if self.invalidate: + self.invalidate_objects_cf() + + print("") + print("%d files uploaded." % self.upload_count) + print("%d files skipped." % self.skip_count) + + def open_cf(self): + """ + Returns an open connection to CloudFront + """ + return boto.connect_cloudfront( + self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY) + + def invalidate_objects_cf(self): + """ + Split the invalidation request in groups of 1000 objects + """ + if not self.AWS_CLOUDFRONT_DISTRIBUTION: + raise CommandError( + 'An object invalidation was requested but the variable ' + 'AWS_CLOUDFRONT_DISTRIBUTION is not present in your settings.') + + # We can't send more than 1000 objects in the same invalidation + # request. + chunk = 1000 + + # Connecting to CloudFront + conn = self.open_cf() + + # Splitting the object list + objs = self.uploaded_files + chunks = [objs[i:i + chunk] for i in range(0, len(objs), chunk)] + + # Invalidation requests + for paths in chunks: + conn.create_invalidation_request( + self.AWS_CLOUDFRONT_DISTRIBUTION, paths) + + def sync_s3(self): + """ + Walks the media/static directories and syncs files to S3 + """ + bucket, key = self.open_s3() + for directory in self.DIRECTORIES: + os.path.walk(directory, self.upload_s3, (bucket, key, self.AWS_BUCKET_NAME, directory)) + + def compress_string(self, s): + """Gzip a given string.""" + zbuf = StringIO() + zfile = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=zbuf) + zfile.write(s) + zfile.close() + return zbuf.getvalue() + + def open_s3(self): + """ + Opens connection to S3 returning bucket and key + """ + conn = boto.connect_s3(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY) + try: + bucket = conn.get_bucket(self.AWS_BUCKET_NAME) + except boto.exception.S3ResponseError: + bucket = conn.create_bucket(self.AWS_BUCKET_NAME) + return bucket, boto.s3.key.Key(bucket) + + def upload_s3(self, arg, dirname, names): + """ + This is the callback to os.path.walk and where much of the work happens + """ + bucket, key, bucket_name, root_dir = arg + + # Skip directories we don't want to sync + if os.path.basename(dirname) in self.FILTER_LIST: + # prevent walk from processing subfiles/subdirs below the ignored one + del names[:] + return + + # Later we assume the MEDIA_ROOT ends with a trailing slash + if not root_dir.endswith(os.path.sep): + root_dir = root_dir + os.path.sep + + for file in names: + headers = {} + + if file in self.FILTER_LIST: + continue # Skip files we don't want to sync + + filename = os.path.join(dirname, file) + if os.path.isdir(filename): + continue # Don't try to upload directories + + file_key = filename[len(root_dir):] + if self.prefix: + file_key = '%s/%s' % (self.prefix, file_key) + + # Check if file on S3 is older than local file, if so, upload + if not self.do_force: + s3_key = bucket.get_key(file_key) + if s3_key: + s3_datetime = datetime.datetime(*time.strptime( + s3_key.last_modified, '%a, %d %b %Y %H:%M:%S %Z')[0:6]) + local_datetime = datetime.datetime.utcfromtimestamp( + os.stat(filename).st_mtime) + if local_datetime < s3_datetime: + self.skip_count += 1 + if self.verbosity > 1: + print("File %s hasn't been modified since last being uploaded" % file_key) + continue + + # File is newer, let's process and upload + if self.verbosity > 0: + print("Uploading %s..." % file_key) + + content_type = mimetypes.guess_type(filename)[0] + if content_type: + headers['Content-Type'] = content_type + file_obj = open(filename, 'rb') + file_size = os.fstat(file_obj.fileno()).st_size + filedata = file_obj.read() + if self.do_gzip: + # Gzipping only if file is large enough (>1K is recommended) + # and only if file is a common text type (not a binary file) + if file_size > 1024 and content_type in self.GZIP_CONTENT_TYPES: + filedata = self.compress_string(filedata) + if self.rename_gzip: + # If rename_gzip is True, then rename the file + # by appending an extension (like '.gz)' to + # original filename. + file_key = '%s.%s' % ( + file_key, self.SYNC_S3_RENAME_GZIP_EXT) + headers['Content-Encoding'] = 'gzip' + if self.verbosity > 1: + print("\tgzipped: %dk to %dk" % (file_size / 1024, len(filedata) / 1024)) + if self.do_expires: + # HTTP/1.0 + headers['Expires'] = '%s GMT' % (email.Utils.formatdate(time.mktime((datetime.datetime.now() + datetime.timedelta(days=365 * 2)).timetuple()))) + # HTTP/1.1 + headers['Cache-Control'] = 'max-age %d' % (3600 * 24 * 365 * 2) + if self.verbosity > 1: + print("\texpires: %s" % headers['Expires']) + print("\tcache-control: %s" % headers['Cache-Control']) + + try: + key.name = file_key + key.set_contents_from_string(filedata, headers, replace=True) + key.set_acl('public-read') + except boto.exception.S3CreateError as e: + print("Failed: %s" % e) + except Exception as e: + print(e) + raise + else: + self.upload_count += 1 + self.uploaded_files.append(file_key) + + file_obj.close() diff --git a/awx/lib/site-packages/django_extensions/management/commands/syncdata.py b/awx/lib/site-packages/django_extensions/management/commands/syncdata.py index 0228ddd938..4ff2295f7f 100644 --- a/awx/lib/site-packages/django_extensions/management/commands/syncdata.py +++ b/awx/lib/site-packages/django_extensions/management/commands/syncdata.py @@ -13,7 +13,6 @@ import sys import six from django.core.management.base import BaseCommand from django.core.management.color import no_style -from optparse import make_option class Command(BaseCommand): @@ -200,11 +199,3 @@ class Command(BaseCommand): # create tables, load data, and query, the query can return # incorrect results. See Django #7572, MySQL #37735. connection.close() - -# Backwards compatibility for Django r9110 -if not [opt for opt in Command.option_list if opt.dest == 'verbosity']: - Command.option_list += ( - make_option('--verbosity', '-v', action="store", dest="verbosity", - default='1', type='choice', choices=['0', '1', '2'], - help="Verbosity level; 0=minimal output, 1=normal output, 2=all output"), - ) diff --git a/awx/lib/site-packages/django_extensions/management/jobs.py b/awx/lib/site-packages/django_extensions/management/jobs.py index 8bf01c2332..9269b4e200 100644 --- a/awx/lib/site-packages/django_extensions/management/jobs.py +++ b/awx/lib/site-packages/django_extensions/management/jobs.py @@ -53,7 +53,11 @@ class YearlyJob(BaseJob): def my_import(name): - imp = __import__(name) + try: + imp = __import__(name) + except ImportError as err: + raise JobError("Failed to import %s with error %s" % (name, err)) + mods = name.split('.') if len(mods) > 1: for mod in mods[1:]: diff --git a/awx/lib/site-packages/django_extensions/management/modelviz.py b/awx/lib/site-packages/django_extensions/management/modelviz.py index 1d18d16972..ca2422c49b 100644 --- a/awx/lib/site-packages/django_extensions/management/modelviz.py +++ b/awx/lib/site-packages/django_extensions/management/modelviz.py @@ -1,34 +1,34 @@ -#!/usr/bin/env python """ -Django model to DOT (Graphviz) converter -by Antonio Cavedoni +modelviz.py - DOT file generator for Django Models -Adapted to be used with django-extensions +Based on: + Django model to DOT (Graphviz) converter + by Antonio Cavedoni + Adapted to be used with django-extensions """ -__version__ = "0.9" +__version__ = "1.0" __license__ = "Python" -__author__ = "Antonio Cavedoni " +__author__ = "Bas van Oostveen ", __contributors__ = [ + "Antonio Cavedoni " "Stefano J. Attardi ", "limodou ", "Carlo C8E Miron", "Andre Campos ", "Justin Findlay ", "Alexander Houben ", - "Bas van Oostveen ", "Joern Hees ", ] import os - +import datetime from django.utils.translation import activate as activate_language from django.utils.safestring import mark_safe from django.template import Context, loader from django.db import models from django.db.models import get_models -from django.db.models.fields.related import \ - ForeignKey, OneToOneField, ManyToManyField, RelatedField +from django.db.models.fields.related import ForeignKey, OneToOneField, ManyToManyField, RelatedField try: from django.db.models.fields.generic import GenericRelation @@ -46,12 +46,15 @@ def parse_file_or_list(arg): def generate_dot(app_labels, **kwargs): + cli_options = kwargs.get('cli_options', None) disable_fields = kwargs.get('disable_fields', False) include_models = parse_file_or_list(kwargs.get('include_models', "")) all_applications = kwargs.get('all_applications', False) use_subgraph = kwargs.get('group_models', False) verbose_names = kwargs.get('verbose_names', False) - inheritance = kwargs.get('inheritance', False) + inheritance = kwargs.get('inheritance', True) + relations_as_fields = kwargs.get("relations_as_fields", True) + sort_fields = kwargs.get("sort_fields", True) language = kwargs.get('language', None) if language is not None: activate_language(language) @@ -67,10 +70,6 @@ def generate_dot(app_labels, **kwargs): return True return False - t = loader.get_template('django_extensions/graph_models/head.html') - c = Context({}) - dot = t.render(c) - apps = [] if all_applications: apps = models.get_apps() @@ -86,8 +85,6 @@ def generate_dot(app_labels, **kwargs): 'name': '"%s"' % app.__name__, 'app_name': "%s" % '.'.join(app.__name__.split('.')[:-1]), 'cluster_app_name': "cluster_%s" % app.__name__.replace(".", "_"), - 'disable_fields': disable_fields, - 'use_subgraph': use_subgraph, 'models': [] }) @@ -151,20 +148,29 @@ def generate_dot(app_labels, **kwargs): 'type': t, 'blank': field.blank, 'abstract': field in abstract_fields, + 'relation': isinstance(field, RelatedField), + 'primary_key': field.primary_key, }) - # Find all the real attributes. Relations are depicted as graph edges instead of attributes - attributes = [field for field in appmodel._meta.local_fields if not isinstance(field, RelatedField)] + attributes = [field for field in appmodel._meta.local_fields] + if not relations_as_fields: + # Find all the 'real' attributes. Relations are depicted as graph edges instead of attributes + attributes = [field for field in attributes if not isinstance(field, RelatedField)] # find primary key and print it first, ignoring implicit id if other pk exists pk = appmodel._meta.pk if not appmodel._meta.abstract and pk in attributes: add_attributes(pk) + for field in attributes: if skip_field(field): continue - if not field.primary_key: - add_attributes(field) + if field == pk: + continue + add_attributes(field) + + if sort_fields: + model['fields'] = sorted(model['fields'], key=lambda field: (not field['primary_key'], not field['relation'], field['label'])) # FIXME: actually many_to_many fields aren't saved in this model's db table, so why should we add an attribute-line for them in the resulting graph? #if appmodel._meta.many_to_many: @@ -241,34 +247,35 @@ def generate_dot(app_labels, **kwargs): 'name': "inheritance", 'label': l, 'arrows': '[arrowhead=empty, arrowtail=none, dir=both]', - 'needs_node': True + 'needs_node': True, } # TODO: seems as if abstract models aren't part of models.getModels, which is why they are printed by this without any attributes. if _rel not in model['relations'] and consider(_rel['target']): model['relations'].append(_rel) graph['models'].append(model) - graphs.append(graph) + if graph['models']: + graphs.append(graph) nodes = [] for graph in graphs: nodes.extend([e['name'] for e in graph['models']]) for graph in graphs: - # don't draw duplication nodes because of relations for model in graph['models']: for relation in model['relations']: if relation['target'] in nodes: relation['needs_node'] = False - # render templates - t = loader.get_template('django_extensions/graph_models/body.html') - dot += '\n' + t.render(graph) - for graph in graphs: - t = loader.get_template('django_extensions/graph_models/rel.html') - dot += '\n' + t.render(graph) + now = datetime.datetime.now() + t = loader.get_template('django_extensions/graph_models/digraph.dot') + c = Context({ + 'created_at': now.strftime("%Y-%m-%d %H:%M"), + 'cli_options': cli_options, + 'disable_fields': disable_fields, + 'use_subgraph': use_subgraph, + 'graphs': graphs, + }) + dot = t.render(c) - t = loader.get_template('django_extensions/graph_models/tail.html') - c = Context({}) - dot += '\n' + t.render(c) return dot diff --git a/awx/lib/site-packages/django_extensions/management/shells.py b/awx/lib/site-packages/django_extensions/management/shells.py index 8042f05e5f..e92d2cd1ea 100644 --- a/awx/lib/site-packages/django_extensions/management/shells.py +++ b/awx/lib/site-packages/django_extensions/management/shells.py @@ -4,6 +4,54 @@ class ObjectImportError(Exception): pass +def import_items(import_directives): + """ + Import the items in import_directives and return a list of the imported items + + Each item in import_directives should be one of the following forms + * a tuple like ('module.submodule', ('classname1', 'classname2')), which indicates a 'from module.submodule import classname1, classname2' + * a tuple like ('module.submodule', 'classname1'), which indicates a 'from module.submodule import classname1' + * a tuple like ('module.submodule', '*'), which indicates a 'from module.submodule import *' + * a simple 'module.submodule' which indicates 'import module.submodule'. + + Returns a dict mapping the names to the imported items + """ + imported_objects = {} + for directive in import_directives: + try: + # First try a straight import + if type(directive) is str: + imported_object = __import__(directive) + imported_objects[directive.split('.')[0]] = imported_object + print("import %s" % directive) + continue + try: + # Try the ('module.submodule', ('classname1', 'classname2')) form + for name in directive[1]: + imported_object = getattr(__import__(directive[0], {}, {}, name), name) + imported_objects[name] = imported_object + print("from %s import %s" % (directive[0], ', '.join(directive[1]))) + # If it is a tuple, but the second item isn't a list, so we have something like ('module.submodule', 'classname1') + except AttributeError: + # Check for the special '*' to import all + if directive[1] == '*': + imported_object = __import__(directive[0], {}, {}, directive[1]) + for k in dir(imported_object): + imported_objects[k] = getattr(imported_object, k) + print("from %s import *" % directive[0]) + else: + imported_object = getattr(__import__(directive[0], {}, {}, directive[1]), directive[1]) + imported_objects[directive[1]] = imported_object + print("from %s import %s" % (directive[0], directive[1])) + except ImportError: + try: + print("Unable to import %s" % directive) + except TypeError: + print("Unable to import %s from %s" % directive) + + return imported_objects + + def import_objects(options, style): # XXX: (Temporary) workaround for ticket #1796: force early loading of all # models from installed apps. (this is fixed by now, but leaving it here @@ -21,6 +69,11 @@ def import_objects(options, style): model_aliases = getattr(settings, 'SHELL_PLUS_MODEL_ALIASES', {}) + # Perform pre-imports before any other imports + imports = import_items(getattr(settings, 'SHELL_PLUS_PRE_IMPORTS', {})) + for k, v in imports.items(): + imported_objects[k] = v + for app_mod in get_apps(): app_models = get_models(app_mod) if not app_models: @@ -55,4 +108,9 @@ def import_objects(options, style): if not quiet_load: print(style.SQL_COLTYPE("From '%s' autoload: %s" % (app_mod.__name__.split('.')[-2], ", ".join(model_labels)))) + # Perform post-imports after any other imports + imports = import_items(getattr(settings, 'SHELL_PLUS_POST_IMPORTS', {})) + for k, v in imports.items(): + imported_objects[k] = v + return imported_objects diff --git a/awx/lib/site-packages/django_extensions/management/utils.py b/awx/lib/site-packages/django_extensions/management/utils.py index 3ffed0c5bb..45cd046bb9 100644 --- a/awx/lib/site-packages/django_extensions/management/utils.py +++ b/awx/lib/site-packages/django_extensions/management/utils.py @@ -3,11 +3,25 @@ import os import sys import logging +try: + from importlib import import_module +except ImportError: + try: + from django.utils.importlib import import_module + except ImportError: + def import_module(module): + return __import__(module, {}, {}, ['']) + def get_project_root(): """ get the project root directory """ - settings_mod = __import__(settings.SETTINGS_MODULE, {}, {}, ['']) - return os.path.dirname(os.path.abspath(settings_mod.__file__)) + django_settings_module = os.environ.get('DJANGO_SETTINGS_MODULE') + if not django_settings_module: + module_str = settings.SETTINGS_MODULE + else: + module_str = django_settings_module.split(".")[0] + mod = import_module(module_str) + return os.path.dirname(os.path.abspath(mod.__file__)) def _make_writeable(filename): diff --git a/awx/lib/site-packages/django_extensions/static/django_extensions/css/jquery.autocomplete.css b/awx/lib/site-packages/django_extensions/static/django_extensions/css/jquery.autocomplete.css index 27a58523ed..c9d97b0396 100644 --- a/awx/lib/site-packages/django_extensions/static/django_extensions/css/jquery.autocomplete.css +++ b/awx/lib/site-packages/django_extensions/static/django_extensions/css/jquery.autocomplete.css @@ -1,43 +1,38 @@ -/***************************************************************************** - * jQuery autocomplete - ****************************************************************************/ -.ac_results { - padding: 0px; - border: 1px solid #ccc; - background-color: #fff; - overflow: hidden; - z-index: 99999; - text-align: left; +/** + * @fileOverview CSS for jquery-autocomplete, the jQuery Autocompleter + * @author Dylan Verheul + * @license MIT | GPL | Apache 2.0, see LICENSE.txt + * @see https://github.com/dyve/jquery-autocomplete + */ +.acResults { + padding: 0px; + border: 1px solid WindowFrame; + background-color: Window; + overflow: hidden; } -.ac_results ul { - width: 100%; - list-style-position: outside; - list-style: none; - padding: 0; - margin: 0; +.acResults ul { + margin: 0px; + padding: 0px; + list-style-position: outside; + list-style: none; } -.ac_results li { - margin: 0px; - padding: 3px 5px; - cursor: default; - display: block; - font: menu; - font-size: 12px; - line-height: 14px; - overflow: hidden; +.acResults ul li { + margin: 0px; + padding: 2px 5px; + cursor: pointer; + display: block; + font: menu; + font-size: 12px; + overflow: hidden; } -.ac_loading { - background: white url('../img/indicator.gif') right center no-repeat; +.acLoading { + background : url('../img/indicator.gif') right center no-repeat; } -.ac_odd { - background-color: #eee; -} - -.ac_over { - background-color: #999; - color: white; +.acSelect { + background-color: Highlight; + color: HighlightText; } diff --git a/awx/lib/site-packages/django_extensions/static/django_extensions/js/jquery.autocomplete.js b/awx/lib/site-packages/django_extensions/static/django_extensions/js/jquery.autocomplete.js index 63f4734eee..7ba5d7402a 100644 --- a/awx/lib/site-packages/django_extensions/static/django_extensions/js/jquery.autocomplete.js +++ b/awx/lib/site-packages/django_extensions/static/django_extensions/js/jquery.autocomplete.js @@ -1,762 +1,1152 @@ -/* - * Autocomplete - jQuery plugin 1.0.2 - * - * Copyright (c) 2007 Dylan Verheul, Dan G. Switzer, Anjesh Tuladhar, Jörn Zaefferer - * - * Dual licensed under the MIT and GPL licenses: - * http://www.opensource.org/licenses/mit-license.php - * http://www.gnu.org/licenses/gpl.html - * - * Revision: $Id: jquery.autocomplete.js 5747 2008-06-25 18:30:55Z joern.zaefferer $ - * +/** + * @fileOverview jquery-autocomplete, the jQuery Autocompleter + * @author Dylan Verheul + * @version 2.4.4 + * @requires jQuery 1.6+ + * @license MIT | GPL | Apache 2.0, see LICENSE.txt + * @see https://github.com/dyve/jquery-autocomplete */ - -;(function($) { - -$.fn.extend({ - autocomplete: function(urlOrData, options) { - var isUrl = typeof urlOrData == "string"; - options = $.extend({}, $.Autocompleter.defaults, { - url: isUrl ? urlOrData : null, - data: isUrl ? null : urlOrData, - delay: isUrl ? $.Autocompleter.defaults.delay : 10, - max: options && !options.scroll ? 10 : 150 - }, options); - - // if highlight is set to false, replace it with a do-nothing function - options.highlight = options.highlight || function(value) { return value; }; - - // if the formatMatch option is not specified, then use formatItem for backwards compatibility - options.formatMatch = options.formatMatch || options.formatItem; - - return this.each(function() { - new $.Autocompleter(this, options); - }); - }, - result: function(handler) { - return this.bind("result", handler); - }, - search: function(handler) { - return this.trigger("search", [handler]); - }, - flushCache: function() { - return this.trigger("flushCache"); - }, - setOptions: function(options){ - return this.trigger("setOptions", [options]); - }, - unautocomplete: function() { - return this.trigger("unautocomplete"); - } -}); - -$.Autocompleter = function(input, options) { - - var KEY = { - UP: 38, - DOWN: 40, - DEL: 46, - TAB: 9, - RETURN: 13, - ESC: 27, - COMMA: 188, - PAGEUP: 33, - PAGEDOWN: 34, - BACKSPACE: 8 - }; - - // Create $ object for input element - var $input = $(input).attr("autocomplete", "off").addClass(options.inputClass); - - var timeout; - var previousValue = ""; - var cache = $.Autocompleter.Cache(options); - var hasFocus = 0; - var lastKeyPressCode; - var config = { - mouseDownOnSelect: false - }; - var select = $.Autocompleter.Select(options, input, selectCurrent, config); - - var blockSubmit; - - // prevent form submit in opera when selecting with return key - $.browser.opera && $(input.form).bind("submit.autocomplete", function() { - if (blockSubmit) { - blockSubmit = false; - return false; - } - }); - - // only opera doesn't trigger keydown multiple times while pressed, others don't work with keypress at all - $input.bind(($.browser.opera ? "keypress" : "keydown") + ".autocomplete", function(event) { - // track last key pressed - lastKeyPressCode = event.keyCode; - switch(event.keyCode) { - - case KEY.UP: - event.preventDefault(); - if ( select.visible() ) { - select.prev(); - } else { - onChange(0, true); - } - break; - - case KEY.DOWN: - event.preventDefault(); - if ( select.visible() ) { - select.next(); - } else { - onChange(0, true); - } - break; - - case KEY.PAGEUP: - event.preventDefault(); - if ( select.visible() ) { - select.pageUp(); - } else { - onChange(0, true); - } - break; - - case KEY.PAGEDOWN: - event.preventDefault(); - if ( select.visible() ) { - select.pageDown(); - } else { - onChange(0, true); - } - break; - - // matches also semicolon - case options.multiple && $.trim(options.multipleSeparator) == "," && KEY.COMMA: - case KEY.TAB: - case KEY.RETURN: - if( selectCurrent() ) { - // stop default to prevent a form submit, Opera needs special handling - event.preventDefault(); - blockSubmit = true; - return false; - } - break; - - case KEY.ESC: - select.hide(); - break; - - default: - clearTimeout(timeout); - timeout = setTimeout(onChange, options.delay); - break; - } - }).focus(function(){ - // track whether the field has focus, we shouldn't process any - // results if the field no longer has focus - hasFocus++; - }).blur(function() { - hasFocus = 0; - if (!config.mouseDownOnSelect) { - hideResults(); - } - }).click(function() { - // show select when clicking in a focused field - if ( hasFocus++ > 1 && !select.visible() ) { - onChange(0, true); - } - }).bind("search", function() { - // TODO why not just specifying both arguments? - var fn = (arguments.length > 1) ? arguments[1] : null; - function findValueCallback(q, data) { - var result; - if( data && data.length ) { - for (var i=0; i < data.length; i++) { - if( data[i].result.toLowerCase() == q.toLowerCase() ) { - result = data[i]; - break; - } - } - } - if( typeof fn == "function" ) fn(result); - else $input.trigger("result", result && [result.data, result.value]); - } - $.each(trimWords($input.val()), function(i, value) { - request(value, findValueCallback, findValueCallback); - }); - }).bind("flushCache", function() { - cache.flush(); - }).bind("setOptions", function() { - $.extend(options, arguments[1]); - // if we've updated the data, repopulate - if ( "data" in arguments[1] ) - cache.populate(); - }).bind("unautocomplete", function() { - select.unbind(); - $input.unbind(); - $(input.form).unbind(".autocomplete"); - }); - - - function selectCurrent() { - var selected = select.selected(); - if( !selected ) - return false; - - var v = selected.result; - previousValue = v; - - if ( options.multiple ) { - var words = trimWords($input.val()); - if ( words.length > 1 ) { - v = words.slice(0, words.length - 1).join( options.multipleSeparator ) + options.multipleSeparator + v; - } - v += options.multipleSeparator; - } - - $input.val(v); - hideResultsNow(); - $input.trigger("result", [selected.data, selected.value]); - return true; - } - - function onChange(crap, skipPrevCheck) { - if( lastKeyPressCode == KEY.DEL ) { - select.hide(); - return; - } - - var currentValue = $input.val(); - - if ( !skipPrevCheck && currentValue == previousValue ) - return; - - previousValue = currentValue; - - currentValue = lastWord(currentValue); - if ( currentValue.length >= options.minChars) { - $input.addClass(options.loadingClass); - if (!options.matchCase) - currentValue = currentValue.toLowerCase(); - request(currentValue, receiveData, hideResultsNow); - } else { - stopLoading(); - select.hide(); - } - }; - - function trimWords(value) { - if ( !value ) { - return [""]; - } - var words = value.split( options.multipleSeparator ); - var result = []; - $.each(words, function(i, value) { - if ( $.trim(value) ) - result[i] = $.trim(value); - }); - return result; - } - - function lastWord(value) { - if ( !options.multiple ) - return value; - var words = trimWords(value); - return words[words.length - 1]; - } - - // fills in the input box w/the first match (assumed to be the best match) - // q: the term entered - // sValue: the first matching result - function autoFill(q, sValue){ - // autofill in the complete box w/the first match as long as the user hasn't entered in more data - // if the last user key pressed was backspace, don't autofill - if( options.autoFill && (lastWord($input.val()).toLowerCase() == q.toLowerCase()) && lastKeyPressCode != KEY.BACKSPACE ) { - // fill in the value (keep the case the user has typed) - $input.val($input.val() + sValue.substring(lastWord(previousValue).length)); - // select the portion of the value not typed by the user (so the next character will erase) - $.Autocompleter.Selection(input, previousValue.length, previousValue.length + sValue.length); - } - }; - - function hideResults() { - clearTimeout(timeout); - timeout = setTimeout(hideResultsNow, 200); - }; - - function hideResultsNow() { - var wasVisible = select.visible(); - select.hide(); - clearTimeout(timeout); - stopLoading(); - if (options.mustMatch) { - // call search and run callback - $input.search( - function (result){ - // if no value found, clear the input box - if( !result ) { - if (options.multiple) { - var words = trimWords($input.val()).slice(0, -1); - $input.val( words.join(options.multipleSeparator) + (words.length ? options.multipleSeparator : "") ); - } - else - $input.val( "" ); - } - } - ); - } - if (wasVisible) - // position cursor at end of input field - $.Autocompleter.Selection(input, input.value.length, input.value.length); - }; - - function receiveData(q, data) { - if ( data && data.length && hasFocus ) { - stopLoading(); - select.display(data, q); - autoFill(q, data[0].value); - select.show(); - } else { - hideResultsNow(); - } - }; - - function request(term, success, failure) { - if (!options.matchCase) - term = term.toLowerCase(); - var data = cache.load(term); - // recieve the cached data - if (data && data.length) { - success(term, data); - // if an AJAX url has been supplied, try loading the data now - } else if( (typeof options.url == "string") && (options.url.length > 0) ){ - - var extraParams = { - timestamp: +new Date() - }; - $.each(options.extraParams, function(key, param) { - extraParams[key] = typeof param == "function" ? param() : param; - }); - - $.ajax({ - // try to leverage ajaxQueue plugin to abort previous requests - mode: "abort", - // limit abortion to this input - port: "autocomplete" + input.name, - dataType: options.dataType, - url: options.url, - data: $.extend({ - q: lastWord(term), - limit: options.max - }, extraParams), - success: function(data) { - var parsed = options.parse && options.parse(data) || parse(data); - cache.add(term, parsed); - success(term, parsed); - } - }); - } else { - // if we have a failure, we need to empty the list -- this prevents the the [TAB] key from selecting the last successful match - select.emptyList(); - failure(term); - } - }; - - function parse(data) { - var parsed = []; - var rows = data.split("\n"); - for (var i=0; i < rows.length; i++) { - var row = $.trim(rows[i]); - if (row) { - row = row.split("|"); - parsed[parsed.length] = { - data: row, - value: row[0], - result: options.formatResult && options.formatResult(row, row[0]) || row[0] - }; - } - } - return parsed; - }; - - function stopLoading() { - $input.removeClass(options.loadingClass); - }; - -}; - -$.Autocompleter.defaults = { - inputClass: "ac_input", - resultsClass: "ac_results", - loadingClass: "ac_loading", - minChars: 1, - delay: 400, - matchCase: false, - matchSubset: true, - matchContains: false, - cacheLength: 10, - max: 100, - mustMatch: false, - extraParams: {}, - selectFirst: true, - formatItem: function(row) { return row[0]; }, - formatMatch: null, - autoFill: false, - width: 0, - multiple: false, - multipleSeparator: ", ", - highlight: function(value, term) { - return value.replace(new RegExp("(?![^&;]+;)(?!<[^<>]*)(" + term.replace(/([\^\$\(\)\[\]\{\}\*\.\+\?\|\\])/gi, "\\$1") + ")(?![^<>]*>)(?![^&;]+;)", "gi"), "$1"); - }, - scroll: true, - scrollHeight: 180 -}; - -$.Autocompleter.Cache = function(options) { - - var data = {}; - var length = 0; - - function matchSubset(s, sub) { - if (!options.matchCase) - s = s.toLowerCase(); - var i = s.indexOf(sub); - if (i == -1) return false; - return i == 0 || options.matchContains; - }; - - function add(q, value) { - if (length > options.cacheLength){ - flush(); - } - if (!data[q]){ - length++; - } - data[q] = value; - } - - function populate(){ - if( !options.data ) return false; - // track the matches - var stMatchSets = {}, - nullData = 0; - - // no url was specified, we need to adjust the cache length to make sure it fits the local data store - if( !options.url ) options.cacheLength = 1; - - // track all options for minChars = 0 - stMatchSets[""] = []; - - // loop through the array and create a lookup structure - for ( var i = 0, ol = options.data.length; i < ol; i++ ) { - var rawValue = options.data[i]; - // if rawValue is a string, make an array otherwise just reference the array - rawValue = (typeof rawValue == "string") ? [rawValue] : rawValue; - - var value = options.formatMatch(rawValue, i+1, options.data.length); - if ( value === false ) - continue; - - var firstChar = value.charAt(0).toLowerCase(); - // if no lookup array for this character exists, look it up now - if( !stMatchSets[firstChar] ) - stMatchSets[firstChar] = []; - - // if the match is a string - var row = { - value: value, - data: rawValue, - result: options.formatResult && options.formatResult(rawValue) || value - }; - - // push the current match into the set list - stMatchSets[firstChar].push(row); - - // keep track of minChars zero items - if ( nullData++ < options.max ) { - stMatchSets[""].push(row); - } - }; - - // add the data items to the cache - $.each(stMatchSets, function(i, value) { - // increase the cache size - options.cacheLength++; - // add to the cache - add(i, value); - }); - } - - // populate any existing data - setTimeout(populate, 25); - - function flush(){ - data = {}; - length = 0; - } - - return { - flush: flush, - add: add, - populate: populate, - load: function(q) { - if (!options.cacheLength || !length) - return null; - /* - * if dealing w/local data and matchContains than we must make sure - * to loop through all the data collections looking for matches - */ - if( !options.url && options.matchContains ){ - // track all matches - var csub = []; - // loop through all the data grids for matches - for( var k in data ){ - // don't search through the stMatchSets[""] (minChars: 0) cache - // this prevents duplicates - if( k.length > 0 ){ - var c = data[k]; - $.each(c, function(i, x) { - // if we've got a match, add it to the array - if (matchSubset(x.value, q)) { - csub.push(x); - } - }); - } - } - return csub; - } else - // if the exact item exists, use it - if (data[q]){ - return data[q]; - } else - if (options.matchSubset) { - for (var i = q.length - 1; i >= options.minChars; i--) { - var c = data[q.substr(0, i)]; - if (c) { - var csub = []; - $.each(c, function(i, x) { - if (matchSubset(x.value, q)) { - csub[csub.length] = x; - } - }); - return csub; - } - } - } - return null; - } - }; -}; - -$.Autocompleter.Select = function (options, input, select, config) { - var CLASSES = { - ACTIVE: "ac_over" - }; - - var listItems, - active = -1, - data, - term = "", - needsInit = true, - element, - list; - - // Create results - function init() { - if (!needsInit) - return; - element = $("

") - .hide() - .addClass(options.resultsClass) - .css("position", "absolute") - .appendTo(document.body); - - list = $("
    ").appendTo(element).mouseover( function(event) { - if(target(event).nodeName && target(event).nodeName.toUpperCase() == 'LI') { - active = $("li", list).removeClass(CLASSES.ACTIVE).index(target(event)); - $(target(event)).addClass(CLASSES.ACTIVE); - } - }).click(function(event) { - $(target(event)).addClass(CLASSES.ACTIVE); - select(); - // TODO provide option to avoid setting focus again after selection? useful for cleanup-on-focus - input.focus(); - return false; - }).mousedown(function() { - config.mouseDownOnSelect = true; - }).mouseup(function() { - config.mouseDownOnSelect = false; - }); - - if( options.width > 0 ) - element.css("width", options.width); - - needsInit = false; - } - - function target(event) { - var element = event.target; - while(element && element.tagName != "LI") - element = element.parentNode; - // more fun with IE, sometimes event.target is empty, just ignore it then - if(!element) - return []; - return element; - } - - function moveSelect(step) { - listItems.slice(active, active + 1).removeClass(CLASSES.ACTIVE); - movePosition(step); - var activeItem = listItems.slice(active, active + 1).addClass(CLASSES.ACTIVE); - if(options.scroll) { - var offset = 0; - listItems.slice(0, active).each(function() { - offset += this.offsetHeight; - }); - if((offset + activeItem[0].offsetHeight - list.scrollTop()) > list[0].clientHeight) { - list.scrollTop(offset + activeItem[0].offsetHeight - list.innerHeight()); - } else if(offset < list.scrollTop()) { - list.scrollTop(offset); +(function($) { + "use strict"; + + /** + * jQuery autocomplete plugin + * @param {object|string} options + * @returns (object} jQuery object + */ + $.fn.autocomplete = function(options) { + var url; + if (arguments.length > 1) { + url = options; + options = arguments[1]; + options.url = url; + } else if (typeof options === 'string') { + url = options; + options = { url: url }; + } + var opts = $.extend({}, $.fn.autocomplete.defaults, options); + return this.each(function() { + var $this = $(this); + $this.data('autocompleter', new $.Autocompleter( + $this, + $.meta ? $.extend({}, opts, $this.data()) : opts + )); + }); + }; + + /** + * Store default options + * @type {object} + */ + $.fn.autocomplete.defaults = { + inputClass: 'acInput', + loadingClass: 'acLoading', + resultsClass: 'acResults', + selectClass: 'acSelect', + queryParamName: 'q', + extraParams: {}, + remoteDataType: false, + lineSeparator: '\n', + cellSeparator: '|', + minChars: 2, + maxItemsToShow: 10, + delay: 400, + useCache: true, + maxCacheLength: 10, + matchSubset: true, + matchCase: false, + matchInside: true, + mustMatch: false, + selectFirst: false, + selectOnly: false, + showResult: null, + preventDefaultReturn: 1, + preventDefaultTab: 0, + autoFill: false, + filterResults: true, + filter: true, + sortResults: true, + sortFunction: null, + onItemSelect: null, + onNoMatch: null, + onFinish: null, + matchStringConverter: null, + beforeUseConverter: null, + autoWidth: 'min-width', + useDelimiter: false, + delimiterChar: ',', + delimiterKeyCode: 188, + processData: null, + onError: null, + enabled: true + }; + + /** + * Sanitize result + * @param {Object} result + * @returns {Object} object with members value (String) and data (Object) + * @private + */ + var sanitizeResult = function(result) { + var value, data; + var type = typeof result; + if (type === 'string') { + value = result; + data = {}; + } else if ($.isArray(result)) { + value = result[0]; + data = result.slice(1); + } else if (type === 'object') { + value = result.value; + data = result.data; + } + value = String(value); + if (typeof data !== 'object') { + data = {}; + } + return { + value: value, + data: data + }; + }; + + /** + * Sanitize integer + * @param {mixed} value + * @param {Object} options + * @returns {Number} integer + * @private + */ + var sanitizeInteger = function(value, stdValue, options) { + var num = parseInt(value, 10); + options = options || {}; + if (isNaN(num) || (options.min && num < options.min)) { + num = stdValue; + } + return num; + }; + + /** + * Create partial url for a name/value pair + */ + var makeUrlParam = function(name, value) { + return [name, encodeURIComponent(value)].join('='); + }; + + /** + * Build an url + * @param {string} url Base url + * @param {object} [params] Dictionary of parameters + */ + var makeUrl = function(url, params) { + var urlAppend = []; + $.each(params, function(index, value) { + urlAppend.push(makeUrlParam(index, value)); + }); + if (urlAppend.length) { + url += url.indexOf('?') === -1 ? '?' : '&'; + url += urlAppend.join('&'); + } + return url; + }; + + /** + * Default sort filter + * @param {object} a + * @param {object} b + * @param {boolean} matchCase + * @returns {number} + */ + var sortValueAlpha = function(a, b, matchCase) { + a = String(a.value); + b = String(b.value); + if (!matchCase) { + a = a.toLowerCase(); + b = b.toLowerCase(); + } + if (a > b) { + return 1; + } + if (a < b) { + return -1; + } + return 0; + }; + + /** + * Parse data received in text format + * @param {string} text Plain text input + * @param {string} lineSeparator String that separates lines + * @param {string} cellSeparator String that separates cells + * @returns {array} Array of autocomplete data objects + */ + var plainTextParser = function(text, lineSeparator, cellSeparator) { + var results = []; + var i, j, data, line, value, lines; + // Be nice, fix linebreaks before splitting on lineSeparator + lines = String(text).replace('\r\n', '\n').split(lineSeparator); + for (i = 0; i < lines.length; i++) { + line = lines[i].split(cellSeparator); + data = []; + for (j = 0; j < line.length; j++) { + data.push(decodeURIComponent(line[j])); + } + value = data.shift(); + results.push({ value: value, data: data }); + } + return results; + }; + + /** + * Autocompleter class + * @param {object} $elem jQuery object with one input tag + * @param {object} options Settings + * @constructor + */ + $.Autocompleter = function($elem, options) { + + /** + * Assert parameters + */ + if (!$elem || !($elem instanceof $) || $elem.length !== 1 || $elem.get(0).tagName.toUpperCase() !== 'INPUT') { + throw new Error('Invalid parameter for jquery.Autocompleter, jQuery object with one element with INPUT tag expected.'); + } + + /** + * @constant Link to this instance + * @type object + * @private + */ + var self = this; + + /** + * @property {object} Options for this instance + * @public + */ + this.options = options; + + /** + * @property object Cached data for this instance + * @private + */ + this.cacheData_ = {}; + + /** + * @property {number} Number of cached data items + * @private + */ + this.cacheLength_ = 0; + + /** + * @property {string} Class name to mark selected item + * @private + */ + this.selectClass_ = 'jquery-autocomplete-selected-item'; + + /** + * @property {number} Handler to activation timeout + * @private + */ + this.keyTimeout_ = null; + + /** + * @property {number} Handler to finish timeout + * @private + */ + this.finishTimeout_ = null; + + /** + * @property {number} Last key pressed in the input field (store for behavior) + * @private + */ + this.lastKeyPressed_ = null; + + /** + * @property {string} Last value processed by the autocompleter + * @private + */ + this.lastProcessedValue_ = null; + + /** + * @property {string} Last value selected by the user + * @private + */ + this.lastSelectedValue_ = null; + + /** + * @property {boolean} Is this autocompleter active (showing results)? + * @see showResults + * @private + */ + this.active_ = false; + + /** + * @property {boolean} Is this autocompleter allowed to finish on blur? + * @private + */ + this.finishOnBlur_ = true; + + /** + * Sanitize options + */ + this.options.minChars = sanitizeInteger(this.options.minChars, $.fn.autocomplete.defaults.minChars, { min: 0 }); + this.options.maxItemsToShow = sanitizeInteger(this.options.maxItemsToShow, $.fn.autocomplete.defaults.maxItemsToShow, { min: 0 }); + this.options.maxCacheLength = sanitizeInteger(this.options.maxCacheLength, $.fn.autocomplete.defaults.maxCacheLength, { min: 1 }); + this.options.delay = sanitizeInteger(this.options.delay, $.fn.autocomplete.defaults.delay, { min: 0 }); + if (this.options.preventDefaultReturn != 2) { + this.options.preventDefaultReturn = this.options.preventDefaultReturn ? 1 : 0; + } + if (this.options.preventDefaultTab != 2) { + this.options.preventDefaultTab = this.options.preventDefaultTab ? 1 : 0; + } + + /** + * Init DOM elements repository + */ + this.dom = {}; + + /** + * Store the input element we're attached to in the repository + */ + this.dom.$elem = $elem; + + /** + * Switch off the native autocomplete and add the input class + */ + this.dom.$elem.attr('autocomplete', 'off').addClass(this.options.inputClass); + + /** + * Create DOM element to hold results, and force absolute position + */ + this.dom.$results = $('
    ').hide().addClass(this.options.resultsClass).css({ + position: 'absolute' + }); + $('body').append(this.dom.$results); + + /** + * Attach keyboard monitoring to $elem + */ + $elem.keydown(function(e) { + self.lastKeyPressed_ = e.keyCode; + switch(self.lastKeyPressed_) { + + case self.options.delimiterKeyCode: // comma = 188 + if (self.options.useDelimiter && self.active_) { + self.selectCurrent(); + } + break; + + // ignore navigational & special keys + case 35: // end + case 36: // home + case 16: // shift + case 17: // ctrl + case 18: // alt + case 37: // left + case 39: // right + break; + + case 38: // up + e.preventDefault(); + if (self.active_) { + self.focusPrev(); + } else { + self.activate(); + } + return false; + + case 40: // down + e.preventDefault(); + if (self.active_) { + self.focusNext(); + } else { + self.activate(); + } + return false; + + case 9: // tab + if (self.active_) { + self.selectCurrent(); + if (self.options.preventDefaultTab) { + e.preventDefault(); + return false; + } + } + if (self.options.preventDefaultTab === 2) { + e.preventDefault(); + return false; + } + break; + + case 13: // return + if (self.active_) { + self.selectCurrent(); + if (self.options.preventDefaultReturn) { + e.preventDefault(); + return false; + } + } + if (self.options.preventDefaultReturn === 2) { + e.preventDefault(); + return false; + } + break; + + case 27: // escape + if (self.active_) { + e.preventDefault(); + self.deactivate(true); + return false; + } + break; + + default: + self.activate(); + + } + }); + + /** + * Attach paste event listener because paste may occur much later then keydown or even without a keydown at all + */ + $elem.on('paste', function() { + self.activate(); + }); + + /** + * Finish on blur event + * Use a timeout because instant blur gives race conditions + */ + var onBlurFunction = function() { + self.deactivate(true); + } + $elem.blur(function() { + if (self.finishOnBlur_) { + self.finishTimeout_ = setTimeout(onBlurFunction, 200); + } + }); + /** + * Catch a race condition on form submit + */ + $elem.parents('form').on('submit', onBlurFunction); + + }; + + /** + * Position output DOM elements + * @private + */ + $.Autocompleter.prototype.position = function() { + var offset = this.dom.$elem.offset(); + var height = this.dom.$results.outerHeight(); + var totalHeight = $(window).outerHeight(); + var inputBottom = offset.top + this.dom.$elem.outerHeight(); + var bottomIfDown = inputBottom + height; + // Set autocomplete results at the bottom of input + var position = {top: inputBottom, left: offset.left}; + if (bottomIfDown > totalHeight) { + // Try to set autocomplete results at the top of input + var topIfUp = offset.top - height; + if (topIfUp >= 0) { + position.top = topIfUp; } } - }; - - function movePosition(step) { - active += step; - if (active < 0) { - active = listItems.size() - 1; - } else if (active >= listItems.size()) { - active = 0; - } - } - - function limitNumberOfItems(available) { - return options.max && options.max < available - ? options.max - : available; - } - - function fillList() { - list.empty(); - var max = limitNumberOfItems(data.length); - for (var i=0; i < max; i++) { - if (!data[i]) - continue; - var formatted = options.formatItem(data[i].data, i+1, max, data[i].value, term); - if ( formatted === false ) - continue; - var li = $("
  • ").html( options.highlight(formatted, term) ).addClass(i%2 == 0 ? "ac_even" : "ac_odd").appendTo(list)[0]; - $.data(li, "ac_data", data[i]); - } - listItems = list.find("li"); - if ( options.selectFirst ) { - listItems.slice(0, 1).addClass(CLASSES.ACTIVE); - active = 0; - } - // apply bgiframe if available - if ( $.fn.bgiframe ) - list.bgiframe(); - } - - return { - display: function(d, q) { - init(); - data = d; - term = q; - fillList(); - }, - next: function() { - moveSelect(1); - }, - prev: function() { - moveSelect(-1); - }, - pageUp: function() { - if (active != 0 && active - 8 < 0) { - moveSelect( -active ); - } else { - moveSelect(-8); - } - }, - pageDown: function() { - if (active != listItems.size() - 1 && active + 8 > listItems.size()) { - moveSelect( listItems.size() - 1 - active ); - } else { - moveSelect(8); - } - }, - hide: function() { - element && element.hide(); - listItems && listItems.removeClass(CLASSES.ACTIVE); - active = -1; - }, - visible : function() { - return element && element.is(":visible"); - }, - current: function() { - return this.visible() && (listItems.filter("." + CLASSES.ACTIVE)[0] || options.selectFirst && listItems[0]); - }, - show: function() { - var offset = $(input).offset(); - element.css({ - width: typeof options.width == "string" || options.width > 0 ? options.width : $(input).width(), - top: offset.top + input.offsetHeight, - left: offset.left - }).show(); - if(options.scroll) { - list.scrollTop(0); - list.css({ - maxHeight: options.scrollHeight, - overflow: 'auto' - }); - - if($.browser.msie && typeof document.body.style.maxHeight === "undefined") { - var listHeight = 0; - listItems.each(function() { - listHeight += this.offsetHeight; - }); - var scrollbarsVisible = listHeight > options.scrollHeight; - list.css('height', scrollbarsVisible ? options.scrollHeight : listHeight ); - if (!scrollbarsVisible) { - // IE doesn't recalculate width when scrollbar disappears - listItems.width( list.width() - parseInt(listItems.css("padding-left")) - parseInt(listItems.css("padding-right")) ); - } - } + this.dom.$results.css(position); + }; + /** + * Read from cache + * @private + */ + $.Autocompleter.prototype.cacheRead = function(filter) { + var filterLength, searchLength, search, maxPos, pos; + if (this.options.useCache) { + filter = String(filter); + filterLength = filter.length; + if (this.options.matchSubset) { + searchLength = 1; + } else { + searchLength = filterLength; } - }, - selected: function() { - var selected = listItems && listItems.filter("." + CLASSES.ACTIVE).removeClass(CLASSES.ACTIVE); - return selected && selected.length && $.data(selected[0], "ac_data"); - }, - emptyList: function (){ - list && list.empty(); - }, - unbind: function() { - element && element.remove(); - } - }; -}; + while (searchLength <= filterLength) { + if (this.options.matchInside) { + maxPos = filterLength - searchLength; + } else { + maxPos = 0; + } + pos = 0; + while (pos <= maxPos) { + search = filter.substr(0, searchLength); + if (this.cacheData_[search] !== undefined) { + return this.cacheData_[search]; + } + pos++; + } + searchLength++; + } + } + return false; + }; -$.Autocompleter.Selection = function(field, start, end) { - if( field.createTextRange ){ - var selRange = field.createTextRange(); - selRange.collapse(true); - selRange.moveStart("character", start); - selRange.moveEnd("character", end); - selRange.select(); - } else if( field.setSelectionRange ){ - field.setSelectionRange(start, end); - } else { - if( field.selectionStart ){ - field.selectionStart = start; - field.selectionEnd = end; - } - } - field.focus(); -}; + /** + * Write to cache + * @private + */ + $.Autocompleter.prototype.cacheWrite = function(filter, data) { + if (this.options.useCache) { + if (this.cacheLength_ >= this.options.maxCacheLength) { + this.cacheFlush(); + } + filter = String(filter); + if (this.cacheData_[filter] !== undefined) { + this.cacheLength_++; + } + this.cacheData_[filter] = data; + return this.cacheData_[filter]; + } + return false; + }; -})((typeof window.jQuery == 'undefined' && typeof window.django != 'undefined') - ? django.jQuery - : jQuery -); + /** + * Flush cache + * @public + */ + $.Autocompleter.prototype.cacheFlush = function() { + this.cacheData_ = {}; + this.cacheLength_ = 0; + }; + + /** + * Call hook + * Note that all called hooks are passed the autocompleter object + * @param {string} hook + * @param data + * @returns Result of called hook, false if hook is undefined + */ + $.Autocompleter.prototype.callHook = function(hook, data) { + var f = this.options[hook]; + if (f && $.isFunction(f)) { + return f(data, this); + } + return false; + }; + + /** + * Set timeout to activate autocompleter + */ + $.Autocompleter.prototype.activate = function() { + if (!this.options.enabled) return; + var self = this; + if (this.keyTimeout_) { + clearTimeout(this.keyTimeout_); + } + this.keyTimeout_ = setTimeout(function() { + self.activateNow(); + }, this.options.delay); + }; + + /** + * Activate autocompleter immediately + */ + $.Autocompleter.prototype.activateNow = function() { + var value = this.beforeUseConverter(this.dom.$elem.val()); + if (value !== this.lastProcessedValue_ && value !== this.lastSelectedValue_) { + this.fetchData(value); + } + }; + + /** + * Get autocomplete data for a given value + * @param {string} value Value to base autocompletion on + * @private + */ + $.Autocompleter.prototype.fetchData = function(value) { + var self = this; + var processResults = function(results, filter) { + if (self.options.processData) { + results = self.options.processData(results); + } + self.showResults(self.filterResults(results, filter), filter); + }; + this.lastProcessedValue_ = value; + if (value.length < this.options.minChars) { + processResults([], value); + } else if (this.options.data) { + processResults(this.options.data, value); + } else { + this.fetchRemoteData(value, function(remoteData) { + processResults(remoteData, value); + }); + } + }; + + /** + * Get remote autocomplete data for a given value + * @param {string} filter The filter to base remote data on + * @param {function} callback The function to call after data retrieval + * @private + */ + $.Autocompleter.prototype.fetchRemoteData = function(filter, callback) { + var data = this.cacheRead(filter); + if (data) { + callback(data); + } else { + var self = this; + var dataType = self.options.remoteDataType === 'json' ? 'json' : 'text'; + var ajaxCallback = function(data) { + var parsed = false; + if (data !== false) { + parsed = self.parseRemoteData(data); + self.cacheWrite(filter, parsed); + } + self.dom.$elem.removeClass(self.options.loadingClass); + callback(parsed); + }; + this.dom.$elem.addClass(this.options.loadingClass); + $.ajax({ + url: this.makeUrl(filter), + success: ajaxCallback, + error: function(jqXHR, textStatus, errorThrown) { + if($.isFunction(self.options.onError)) { + self.options.onError(jqXHR, textStatus, errorThrown); + } else { + ajaxCallback(false); + } + }, + dataType: dataType + }); + } + }; + + /** + * Create or update an extra parameter for the remote request + * @param {string} name Parameter name + * @param {string} value Parameter value + * @public + */ + $.Autocompleter.prototype.setExtraParam = function(name, value) { + var index = $.trim(String(name)); + if (index) { + if (!this.options.extraParams) { + this.options.extraParams = {}; + } + if (this.options.extraParams[index] !== value) { + this.options.extraParams[index] = value; + this.cacheFlush(); + } + } + + return this; + }; + + /** + * Build the url for a remote request + * If options.queryParamName === false, append query to url instead of using a GET parameter + * @param {string} param The value parameter to pass to the backend + * @returns {string} The finished url with parameters + */ + $.Autocompleter.prototype.makeUrl = function(param) { + var self = this; + var url = this.options.url; + var params = $.extend({}, this.options.extraParams); + + if (this.options.queryParamName === false) { + url += encodeURIComponent(param); + } else { + params[this.options.queryParamName] = param; + } + + return makeUrl(url, params); + }; + + /** + * Parse data received from server + * @param remoteData Data received from remote server + * @returns {array} Parsed data + */ + $.Autocompleter.prototype.parseRemoteData = function(remoteData) { + var remoteDataType; + var data = remoteData; + if (this.options.remoteDataType === 'json') { + remoteDataType = typeof(remoteData); + switch (remoteDataType) { + case 'object': + data = remoteData; + break; + case 'string': + data = $.parseJSON(remoteData); + break; + default: + throw new Error("Unexpected remote data type: " + remoteDataType); + } + return data; + } + return plainTextParser(data, this.options.lineSeparator, this.options.cellSeparator); + }; + + /** + * Default filter for results + * @param {Object} result + * @param {String} filter + * @returns {boolean} Include this result + * @private + */ + $.Autocompleter.prototype.defaultFilter = function(result, filter) { + if (!result.value) { + return false; + } + if (this.options.filterResults) { + var pattern = this.matchStringConverter(filter); + var testValue = this.matchStringConverter(result.value); + if (!this.options.matchCase) { + pattern = pattern.toLowerCase(); + testValue = testValue.toLowerCase(); + } + var patternIndex = testValue.indexOf(pattern); + if (this.options.matchInside) { + return patternIndex > -1; + } else { + return patternIndex === 0; + } + } + return true; + }; + + /** + * Filter result + * @param {Object} result + * @param {String} filter + * @returns {boolean} Include this result + * @private + */ + $.Autocompleter.prototype.filterResult = function(result, filter) { + // No filter + if (this.options.filter === false) { + return true; + } + // Custom filter + if ($.isFunction(this.options.filter)) { + return this.options.filter(result, filter); + } + // Default filter + return this.defaultFilter(result, filter); + }; + + /** + * Filter results + * @param results + * @param filter + */ + $.Autocompleter.prototype.filterResults = function(results, filter) { + var filtered = []; + var i, result; + + for (i = 0; i < results.length; i++) { + result = sanitizeResult(results[i]); + if (this.filterResult(result, filter)) { + filtered.push(result); + } + } + if (this.options.sortResults) { + filtered = this.sortResults(filtered, filter); + } + if (this.options.maxItemsToShow > 0 && this.options.maxItemsToShow < filtered.length) { + filtered.length = this.options.maxItemsToShow; + } + return filtered; + }; + + /** + * Sort results + * @param results + * @param filter + */ + $.Autocompleter.prototype.sortResults = function(results, filter) { + var self = this; + var sortFunction = this.options.sortFunction; + if (!$.isFunction(sortFunction)) { + sortFunction = function(a, b, f) { + return sortValueAlpha(a, b, self.options.matchCase); + }; + } + results.sort(function(a, b) { + return sortFunction(a, b, filter, self.options); + }); + return results; + }; + + /** + * Convert string before matching + * @param s + * @param a + * @param b + */ + $.Autocompleter.prototype.matchStringConverter = function(s, a, b) { + var converter = this.options.matchStringConverter; + if ($.isFunction(converter)) { + s = converter(s, a, b); + } + return s; + }; + + /** + * Convert string before use + * @param {String} s + */ + $.Autocompleter.prototype.beforeUseConverter = function(s) { + s = this.getValue(s); + var converter = this.options.beforeUseConverter; + if ($.isFunction(converter)) { + s = converter(s); + } + return s; + }; + + /** + * Enable finish on blur event + */ + $.Autocompleter.prototype.enableFinishOnBlur = function() { + this.finishOnBlur_ = true; + }; + + /** + * Disable finish on blur event + */ + $.Autocompleter.prototype.disableFinishOnBlur = function() { + this.finishOnBlur_ = false; + }; + + /** + * Create a results item (LI element) from a result + * @param result + */ + $.Autocompleter.prototype.createItemFromResult = function(result) { + var self = this; + var $li = $('
  • '); + $li.html(this.showResult(result.value, result.data)); + $li.data({value: result.value, data: result.data}) + .click(function() { + self.selectItem($li); + }) + .mousedown(self.disableFinishOnBlur) + .mouseup(self.enableFinishOnBlur) + ; + return $li; + }; + + /** + * Get all items from the results list + * @param result + */ + $.Autocompleter.prototype.getItems = function() { + return $('>ul>li', this.dom.$results); + }; + + /** + * Show all results + * @param results + * @param filter + */ + $.Autocompleter.prototype.showResults = function(results, filter) { + var numResults = results.length; + var self = this; + var $ul = $('
      '); + var i, result, $li, autoWidth, first = false, $first = false; + + if (numResults) { + for (i = 0; i < numResults; i++) { + result = results[i]; + $li = this.createItemFromResult(result); + $ul.append($li); + if (first === false) { + first = String(result.value); + $first = $li; + $li.addClass(this.options.firstItemClass); + } + if (i === numResults - 1) { + $li.addClass(this.options.lastItemClass); + } + } + + this.dom.$results.html($ul).show(); + + // Always recalculate position since window size or + // input element location may have changed. + this.position(); + if (this.options.autoWidth) { + autoWidth = this.dom.$elem.outerWidth() - this.dom.$results.outerWidth() + this.dom.$results.width(); + this.dom.$results.css(this.options.autoWidth, autoWidth); + } + this.getItems().hover( + function() { self.focusItem(this); }, + function() { /* void */ } + ); + if (this.autoFill(first, filter) || this.options.selectFirst || (this.options.selectOnly && numResults === 1)) { + this.focusItem($first); + } + this.active_ = true; + } else { + this.hideResults(); + this.active_ = false; + } + }; + + $.Autocompleter.prototype.showResult = function(value, data) { + if ($.isFunction(this.options.showResult)) { + return this.options.showResult(value, data); + } else { + return $('

      ').text(value).html(); + } + }; + + $.Autocompleter.prototype.autoFill = function(value, filter) { + var lcValue, lcFilter, valueLength, filterLength; + if (this.options.autoFill && this.lastKeyPressed_ !== 8) { + lcValue = String(value).toLowerCase(); + lcFilter = String(filter).toLowerCase(); + valueLength = value.length; + filterLength = filter.length; + if (lcValue.substr(0, filterLength) === lcFilter) { + var d = this.getDelimiterOffsets(); + var pad = d.start ? ' ' : ''; // if there is a preceding delimiter + this.setValue( pad + value ); + var start = filterLength + d.start + pad.length; + var end = valueLength + d.start + pad.length; + this.selectRange(start, end); + return true; + } + } + return false; + }; + + $.Autocompleter.prototype.focusNext = function() { + this.focusMove(+1); + }; + + $.Autocompleter.prototype.focusPrev = function() { + this.focusMove(-1); + }; + + $.Autocompleter.prototype.focusMove = function(modifier) { + var $items = this.getItems(); + modifier = sanitizeInteger(modifier, 0); + if (modifier) { + for (var i = 0; i < $items.length; i++) { + if ($($items[i]).hasClass(this.selectClass_)) { + this.focusItem(i + modifier); + return; + } + } + } + this.focusItem(0); + }; + + $.Autocompleter.prototype.focusItem = function(item) { + var $item, $items = this.getItems(); + if ($items.length) { + $items.removeClass(this.selectClass_).removeClass(this.options.selectClass); + if (typeof item === 'number') { + if (item < 0) { + item = 0; + } else if (item >= $items.length) { + item = $items.length - 1; + } + $item = $($items[item]); + } else { + $item = $(item); + } + if ($item) { + $item.addClass(this.selectClass_).addClass(this.options.selectClass); + } + } + }; + + $.Autocompleter.prototype.selectCurrent = function() { + var $item = $('li.' + this.selectClass_, this.dom.$results); + if ($item.length === 1) { + this.selectItem($item); + } else { + this.deactivate(false); + } + }; + + $.Autocompleter.prototype.selectItem = function($li) { + var value = $li.data('value'); + var data = $li.data('data'); + var displayValue = this.displayValue(value, data); + var processedDisplayValue = this.beforeUseConverter(displayValue); + this.lastProcessedValue_ = processedDisplayValue; + this.lastSelectedValue_ = processedDisplayValue; + var d = this.getDelimiterOffsets(); + var delimiter = this.options.delimiterChar; + var elem = this.dom.$elem; + var extraCaretPos = 0; + if ( this.options.useDelimiter ) { + // if there is a preceding delimiter, add a space after the delimiter + if ( elem.val().substring(d.start-1, d.start) == delimiter && delimiter != ' ' ) { + displayValue = ' ' + displayValue; + } + // if there is not already a delimiter trailing this value, add it + if ( elem.val().substring(d.end, d.end+1) != delimiter && this.lastKeyPressed_ != this.options.delimiterKeyCode ) { + displayValue = displayValue + delimiter; + } else { + // move the cursor after the existing trailing delimiter + extraCaretPos = 1; + } + } + this.setValue(displayValue); + this.setCaret(d.start + displayValue.length + extraCaretPos); + this.callHook('onItemSelect', { value: value, data: data }); + this.deactivate(true); + elem.focus(); + }; + + $.Autocompleter.prototype.displayValue = function(value, data) { + if ($.isFunction(this.options.displayValue)) { + return this.options.displayValue(value, data); + } + return value; + }; + + $.Autocompleter.prototype.hideResults = function() { + this.dom.$results.hide(); + }; + + $.Autocompleter.prototype.deactivate = function(finish) { + if (this.finishTimeout_) { + clearTimeout(this.finishTimeout_); + } + if (this.keyTimeout_) { + clearTimeout(this.keyTimeout_); + } + if (finish) { + if (this.lastProcessedValue_ !== this.lastSelectedValue_) { + if (this.options.mustMatch) { + this.setValue(''); + } + this.callHook('onNoMatch'); + } + if (this.active_) { + this.callHook('onFinish'); + } + this.lastKeyPressed_ = null; + this.lastProcessedValue_ = null; + this.lastSelectedValue_ = null; + this.active_ = false; + } + this.hideResults(); + }; + + $.Autocompleter.prototype.selectRange = function(start, end) { + var input = this.dom.$elem.get(0); + if (input.setSelectionRange) { + input.focus(); + input.setSelectionRange(start, end); + } else if (input.createTextRange) { + var range = input.createTextRange(); + range.collapse(true); + range.moveEnd('character', end); + range.moveStart('character', start); + range.select(); + } + }; + + /** + * Move caret to position + * @param {Number} pos + */ + $.Autocompleter.prototype.setCaret = function(pos) { + this.selectRange(pos, pos); + }; + + /** + * Get caret position + */ + $.Autocompleter.prototype.getCaret = function() { + var $elem = this.dom.$elem; + var elem = $elem[0]; + var val, selection, range, start, end, stored_range; + if (elem.createTextRange) { // IE + selection = document.selection; + if (elem.tagName.toLowerCase() != 'textarea') { + val = $elem.val(); + range = selection.createRange().duplicate(); + range.moveEnd('character', val.length); + if (range.text === '') { + start = val.length; + } else { + start = val.lastIndexOf(range.text); + } + range = selection.createRange().duplicate(); + range.moveStart('character', -val.length); + end = range.text.length; + } else { + range = selection.createRange(); + stored_range = range.duplicate(); + stored_range.moveToElementText(elem); + stored_range.setEndPoint('EndToEnd', range); + start = stored_range.text.length - range.text.length; + end = start + range.text.length; + } + } else { + start = $elem[0].selectionStart; + end = $elem[0].selectionEnd; + } + return { + start: start, + end: end + }; + }; + + /** + * Set the value that is currently being autocompleted + * @param {String} value + */ + $.Autocompleter.prototype.setValue = function(value) { + if ( this.options.useDelimiter ) { + // set the substring between the current delimiters + var val = this.dom.$elem.val(); + var d = this.getDelimiterOffsets(); + var preVal = val.substring(0, d.start); + var postVal = val.substring(d.end); + value = preVal + value + postVal; + } + this.dom.$elem.val(value); + }; + + /** + * Get the value currently being autocompleted + * @param {String} value + */ + $.Autocompleter.prototype.getValue = function(value) { + if ( this.options.useDelimiter ) { + var d = this.getDelimiterOffsets(); + return value.substring(d.start, d.end).trim(); + } else { + return value; + } + }; + + /** + * Get the offsets of the value currently being autocompleted + */ + $.Autocompleter.prototype.getDelimiterOffsets = function() { + var val = this.dom.$elem.val(); + if ( this.options.useDelimiter ) { + var preCaretVal = val.substring(0, this.getCaret().start); + var start = preCaretVal.lastIndexOf(this.options.delimiterChar) + 1; + var postCaretVal = val.substring(this.getCaret().start); + var end = postCaretVal.indexOf(this.options.delimiterChar); + if ( end == -1 ) end = val.length; + end += this.getCaret().start; + } else { + start = 0; + end = val.length; + } + return { + start: start, + end: end + }; + }; + +})(jQuery); diff --git a/awx/lib/site-packages/django_extensions/static/django_extensions/js/jquery.js b/awx/lib/site-packages/django_extensions/static/django_extensions/js/jquery.js deleted file mode 100644 index 400531a2db..0000000000 --- a/awx/lib/site-packages/django_extensions/static/django_extensions/js/jquery.js +++ /dev/null @@ -1,3558 +0,0 @@ -(function(){ -/* - * jQuery 1.2.6 - New Wave Javascript - * - * Copyright (c) 2008 John Resig (jquery.com) - * Dual licensed under the MIT (MIT-LICENSE.txt) - * and GPL (GPL-LICENSE.txt) licenses. - * - * $Date: 2008-05-27 21:17:26 +0200 (Di, 27 Mai 2008) $ - * $Rev: 5700 $ - */ - -// Map over jQuery in case of overwrite -var _jQuery = window.jQuery, -// Map over the $ in case of overwrite - _$ = window.$; - -var jQuery = window.jQuery = window.$ = function( selector, context ) { - // The jQuery object is actually just the init constructor 'enhanced' - return new jQuery.fn.init( selector, context ); -}; - -// A simple way to check for HTML strings or ID strings -// (both of which we optimize for) -var quickExpr = /^[^<]*(<(.|\s)+>)[^>]*$|^#(\w+)$/, - -// Is it a simple selector - isSimple = /^.[^:#\[\.]*$/, - -// Will speed up references to undefined, and allows munging its name. - undefined; - -jQuery.fn = jQuery.prototype = { - init: function( selector, context ) { - // Make sure that a selection was provided - selector = selector || document; - - // Handle $(DOMElement) - if ( selector.nodeType ) { - this[0] = selector; - this.length = 1; - return this; - } - // Handle HTML strings - if ( typeof selector == "string" ) { - // Are we dealing with HTML string or an ID? - var match = quickExpr.exec( selector ); - - // Verify a match, and that no context was specified for #id - if ( match && (match[1] || !context) ) { - - // HANDLE: $(html) -> $(array) - if ( match[1] ) - selector = jQuery.clean( [ match[1] ], context ); - - // HANDLE: $("#id") - else { - var elem = document.getElementById( match[3] ); - - // Make sure an element was located - if ( elem ){ - // Handle the case where IE and Opera return items - // by name instead of ID - if ( elem.id != match[3] ) - return jQuery().find( selector ); - - // Otherwise, we inject the element directly into the jQuery object - return jQuery( elem ); - } - selector = []; - } - - // HANDLE: $(expr, [context]) - // (which is just equivalent to: $(content).find(expr) - } else - return jQuery( context ).find( selector ); - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( jQuery.isFunction( selector ) ) - return jQuery( document )[ jQuery.fn.ready ? "ready" : "load" ]( selector ); - - return this.setArray(jQuery.makeArray(selector)); - }, - - // The current version of jQuery being used - jquery: "1.2.6", - - // The number of elements contained in the matched element set - size: function() { - return this.length; - }, - - // The number of elements contained in the matched element set - length: 0, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - return num == undefined ? - - // Return a 'clean' array - jQuery.makeArray( this ) : - - // Return just the object - this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - // Build a new jQuery matched element set - var ret = jQuery( elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Force the current matched set of elements to become - // the specified array of elements (destroying the stack in the process) - // You should use pushStack() in order to do this, but maintain the stack - setArray: function( elems ) { - // Resetting the length to 0, then using the native Array push - // is a super-fast way to populate an object with array-like properties - this.length = 0; - Array.prototype.push.apply( this, elems ); - - return this; - }, - - // Execute a callback for every element in the matched set. - // (You can seed the arguments with an array of args, but this is - // only used internally.) - each: function( callback, args ) { - return jQuery.each( this, callback, args ); - }, - - // Determine the position of an element within - // the matched set of elements - index: function( elem ) { - var ret = -1; - - // Locate the position of the desired element - return jQuery.inArray( - // If it receives a jQuery object, the first element is used - elem && elem.jquery ? elem[0] : elem - , this ); - }, - - attr: function( name, value, type ) { - var options = name; - - // Look for the case where we're accessing a style value - if ( name.constructor == String ) - if ( value === undefined ) - return this[0] && jQuery[ type || "attr" ]( this[0], name ); - - else { - options = {}; - options[ name ] = value; - } - - // Check to see if we're setting style values - return this.each(function(i){ - // Set all the styles - for ( name in options ) - jQuery.attr( - type ? - this.style : - this, - name, jQuery.prop( this, options[ name ], type, i, name ) - ); - }); - }, - - css: function( key, value ) { - // ignore negative width and height values - if ( (key == 'width' || key == 'height') && parseFloat(value) < 0 ) - value = undefined; - return this.attr( key, value, "curCSS" ); - }, - - text: function( text ) { - if ( typeof text != "object" && text != null ) - return this.empty().append( (this[0] && this[0].ownerDocument || document).createTextNode( text ) ); - - var ret = ""; - - jQuery.each( text || this, function(){ - jQuery.each( this.childNodes, function(){ - if ( this.nodeType != 8 ) - ret += this.nodeType != 1 ? - this.nodeValue : - jQuery.fn.text( [ this ] ); - }); - }); - - return ret; - }, - - wrapAll: function( html ) { - if ( this[0] ) - // The elements to wrap the target around - jQuery( html, this[0].ownerDocument ) - .clone() - .insertBefore( this[0] ) - .map(function(){ - var elem = this; - - while ( elem.firstChild ) - elem = elem.firstChild; - - return elem; - }) - .append(this); - - return this; - }, - - wrapInner: function( html ) { - return this.each(function(){ - jQuery( this ).contents().wrapAll( html ); - }); - }, - - wrap: function( html ) { - return this.each(function(){ - jQuery( this ).wrapAll( html ); - }); - }, - - append: function() { - return this.domManip(arguments, true, false, function(elem){ - if (this.nodeType == 1) - this.appendChild( elem ); - }); - }, - - prepend: function() { - return this.domManip(arguments, true, true, function(elem){ - if (this.nodeType == 1) - this.insertBefore( elem, this.firstChild ); - }); - }, - - before: function() { - return this.domManip(arguments, false, false, function(elem){ - this.parentNode.insertBefore( elem, this ); - }); - }, - - after: function() { - return this.domManip(arguments, false, true, function(elem){ - this.parentNode.insertBefore( elem, this.nextSibling ); - }); - }, - - end: function() { - return this.prevObject || jQuery( [] ); - }, - - find: function( selector ) { - var elems = jQuery.map(this, function(elem){ - return jQuery.find( selector, elem ); - }); - - return this.pushStack( /[^+>] [^+>]/.test( selector ) || selector.indexOf("..") > -1 ? - jQuery.unique( elems ) : - elems ); - }, - - clone: function( events ) { - // Do the clone - var ret = this.map(function(){ - if ( jQuery.browser.msie && !jQuery.isXMLDoc(this) ) { - // IE copies events bound via attachEvent when - // using cloneNode. Calling detachEvent on the - // clone will also remove the events from the orignal - // In order to get around this, we use innerHTML. - // Unfortunately, this means some modifications to - // attributes in IE that are actually only stored - // as properties will not be copied (such as the - // the name attribute on an input). - var clone = this.cloneNode(true), - container = document.createElement("div"); - container.appendChild(clone); - return jQuery.clean([container.innerHTML])[0]; - } else - return this.cloneNode(true); - }); - - // Need to set the expando to null on the cloned set if it exists - // removeData doesn't work here, IE removes it from the original as well - // this is primarily for IE but the data expando shouldn't be copied over in any browser - var clone = ret.find("*").andSelf().each(function(){ - if ( this[ expando ] != undefined ) - this[ expando ] = null; - }); - - // Copy the events from the original to the clone - if ( events === true ) - this.find("*").andSelf().each(function(i){ - if (this.nodeType == 3) - return; - var events = jQuery.data( this, "events" ); - - for ( var type in events ) - for ( var handler in events[ type ] ) - jQuery.event.add( clone[ i ], type, events[ type ][ handler ], events[ type ][ handler ].data ); - }); - - // Return the cloned set - return ret; - }, - - filter: function( selector ) { - return this.pushStack( - jQuery.isFunction( selector ) && - jQuery.grep(this, function(elem, i){ - return selector.call( elem, i ); - }) || - - jQuery.multiFilter( selector, this ) ); - }, - - not: function( selector ) { - if ( selector.constructor == String ) - // test special case where just one selector is passed in - if ( isSimple.test( selector ) ) - return this.pushStack( jQuery.multiFilter( selector, this, true ) ); - else - selector = jQuery.multiFilter( selector, this ); - - var isArrayLike = selector.length && selector[selector.length - 1] !== undefined && !selector.nodeType; - return this.filter(function() { - return isArrayLike ? jQuery.inArray( this, selector ) < 0 : this != selector; - }); - }, - - add: function( selector ) { - return this.pushStack( jQuery.unique( jQuery.merge( - this.get(), - typeof selector == 'string' ? - jQuery( selector ) : - jQuery.makeArray( selector ) - ))); - }, - - is: function( selector ) { - return !!selector && jQuery.multiFilter( selector, this ).length > 0; - }, - - hasClass: function( selector ) { - return this.is( "." + selector ); - }, - - val: function( value ) { - if ( value == undefined ) { - - if ( this.length ) { - var elem = this[0]; - - // We need to handle select boxes special - if ( jQuery.nodeName( elem, "select" ) ) { - var index = elem.selectedIndex, - values = [], - options = elem.options, - one = elem.type == "select-one"; - - // Nothing was selected - if ( index < 0 ) - return null; - - // Loop through all the selected options - for ( var i = one ? index : 0, max = one ? index + 1 : options.length; i < max; i++ ) { - var option = options[ i ]; - - if ( option.selected ) { - // Get the specifc value for the option - value = jQuery.browser.msie && !option.attributes.value.specified ? option.text : option.value; - - // We don't need an array for one selects - if ( one ) - return value; - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - - // Everything else, we just grab the value - } else - return (this[0].value || "").replace(/\r/g, ""); - - } - - return undefined; - } - - if( value.constructor == Number ) - value += ''; - - return this.each(function(){ - if ( this.nodeType != 1 ) - return; - - if ( value.constructor == Array && /radio|checkbox/.test( this.type ) ) - this.checked = (jQuery.inArray(this.value, value) >= 0 || - jQuery.inArray(this.name, value) >= 0); - - else if ( jQuery.nodeName( this, "select" ) ) { - var values = jQuery.makeArray(value); - - jQuery( "option", this ).each(function(){ - this.selected = (jQuery.inArray( this.value, values ) >= 0 || - jQuery.inArray( this.text, values ) >= 0); - }); - - if ( !values.length ) - this.selectedIndex = -1; - - } else - this.value = value; - }); - }, - - html: function( value ) { - return value == undefined ? - (this[0] ? - this[0].innerHTML : - null) : - this.empty().append( value ); - }, - - replaceWith: function( value ) { - return this.after( value ).remove(); - }, - - eq: function( i ) { - return this.slice( i, i + 1 ); - }, - - slice: function() { - return this.pushStack( Array.prototype.slice.apply( this, arguments ) ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map(this, function(elem, i){ - return callback.call( elem, i, elem ); - })); - }, - - andSelf: function() { - return this.add( this.prevObject ); - }, - - data: function( key, value ){ - var parts = key.split("."); - parts[1] = parts[1] ? "." + parts[1] : ""; - - if ( value === undefined ) { - var data = this.triggerHandler("getData" + parts[1] + "!", [parts[0]]); - - if ( data === undefined && this.length ) - data = jQuery.data( this[0], key ); - - return data === undefined && parts[1] ? - this.data( parts[0] ) : - data; - } else - return this.trigger("setData" + parts[1] + "!", [parts[0], value]).each(function(){ - jQuery.data( this, key, value ); - }); - }, - - removeData: function( key ){ - return this.each(function(){ - jQuery.removeData( this, key ); - }); - }, - - domManip: function( args, table, reverse, callback ) { - var clone = this.length > 1, elems; - - return this.each(function(){ - if ( !elems ) { - elems = jQuery.clean( args, this.ownerDocument ); - - if ( reverse ) - elems.reverse(); - } - - var obj = this; - - if ( table && jQuery.nodeName( this, "table" ) && jQuery.nodeName( elems[0], "tr" ) ) - obj = this.getElementsByTagName("tbody")[0] || this.appendChild( this.ownerDocument.createElement("tbody") ); - - var scripts = jQuery( [] ); - - jQuery.each(elems, function(){ - var elem = clone ? - jQuery( this ).clone( true )[0] : - this; - - // execute all scripts after the elements have been injected - if ( jQuery.nodeName( elem, "script" ) ) - scripts = scripts.add( elem ); - else { - // Remove any inner scripts for later evaluation - if ( elem.nodeType == 1 ) - scripts = scripts.add( jQuery( "script", elem ).remove() ); - - // Inject the elements into the document - callback.call( obj, elem ); - } - }); - - scripts.each( evalScript ); - }); - } -}; - -// Give the init function the jQuery prototype for later instantiation -jQuery.fn.init.prototype = jQuery.fn; - -function evalScript( i, elem ) { - if ( elem.src ) - jQuery.ajax({ - url: elem.src, - async: false, - dataType: "script" - }); - - else - jQuery.globalEval( elem.text || elem.textContent || elem.innerHTML || "" ); - - if ( elem.parentNode ) - elem.parentNode.removeChild( elem ); -} - -function now(){ - return +new Date; -} - -jQuery.extend = jQuery.fn.extend = function() { - // copy reference to target object - var target = arguments[0] || {}, i = 1, length = arguments.length, deep = false, options; - - // Handle a deep copy situation - if ( target.constructor == Boolean ) { - deep = target; - target = arguments[1] || {}; - // skip the boolean and the target - i = 2; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target != "object" && typeof target != "function" ) - target = {}; - - // extend jQuery itself if only one argument is passed - if ( length == i ) { - target = this; - --i; - } - - for ( ; i < length; i++ ) - // Only deal with non-null/undefined values - if ( (options = arguments[ i ]) != null ) - // Extend the base object - for ( var name in options ) { - var src = target[ name ], copy = options[ name ]; - - // Prevent never-ending loop - if ( target === copy ) - continue; - - // Recurse if we're merging object values - if ( deep && copy && typeof copy == "object" && !copy.nodeType ) - target[ name ] = jQuery.extend( deep, - // Never move original objects, clone them - src || ( copy.length != null ? [ ] : { } ) - , copy ); - - // Don't bring in undefined values - else if ( copy !== undefined ) - target[ name ] = copy; - - } - - // Return the modified object - return target; -}; - -var expando = "jQuery" + now(), uuid = 0, windowData = {}, - // exclude the following css properties to add px - exclude = /z-?index|font-?weight|opacity|zoom|line-?height/i, - // cache defaultView - defaultView = document.defaultView || {}; - -jQuery.extend({ - noConflict: function( deep ) { - window.$ = _$; - - if ( deep ) - window.jQuery = _jQuery; - - return jQuery; - }, - - // See test/unit/core.js for details concerning this function. - isFunction: function( fn ) { - return !!fn && typeof fn != "string" && !fn.nodeName && - fn.constructor != Array && /^[\s[]?function/.test( fn + "" ); - }, - - // check if an element is in a (or is an) XML document - isXMLDoc: function( elem ) { - return elem.documentElement && !elem.body || - elem.tagName && elem.ownerDocument && !elem.ownerDocument.body; - }, - - // Evalulates a script in a global context - globalEval: function( data ) { - data = jQuery.trim( data ); - - if ( data ) { - // Inspired by code by Andrea Giammarchi - // http://webreflection.blogspot.com/2007/08/global-scope-evaluation-and-dom.html - var head = document.getElementsByTagName("head")[0] || document.documentElement, - script = document.createElement("script"); - - script.type = "text/javascript"; - if ( jQuery.browser.msie ) - script.text = data; - else - script.appendChild( document.createTextNode( data ) ); - - // Use insertBefore instead of appendChild to circumvent an IE6 bug. - // This arises when a base node is used (#2709). - head.insertBefore( script, head.firstChild ); - head.removeChild( script ); - } - }, - - nodeName: function( elem, name ) { - return elem.nodeName && elem.nodeName.toUpperCase() == name.toUpperCase(); - }, - - cache: {}, - - data: function( elem, name, data ) { - elem = elem == window ? - windowData : - elem; - - var id = elem[ expando ]; - - // Compute a unique ID for the element - if ( !id ) - id = elem[ expando ] = ++uuid; - - // Only generate the data cache if we're - // trying to access or manipulate it - if ( name && !jQuery.cache[ id ] ) - jQuery.cache[ id ] = {}; - - // Prevent overriding the named cache with undefined values - if ( data !== undefined ) - jQuery.cache[ id ][ name ] = data; - - // Return the named cache data, or the ID for the element - return name ? - jQuery.cache[ id ][ name ] : - id; - }, - - removeData: function( elem, name ) { - elem = elem == window ? - windowData : - elem; - - var id = elem[ expando ]; - - // If we want to remove a specific section of the element's data - if ( name ) { - if ( jQuery.cache[ id ] ) { - // Remove the section of cache data - delete jQuery.cache[ id ][ name ]; - - // If we've removed all the data, remove the element's cache - name = ""; - - for ( name in jQuery.cache[ id ] ) - break; - - if ( !name ) - jQuery.removeData( elem ); - } - - // Otherwise, we want to remove all of the element's data - } else { - // Clean up the element expando - try { - delete elem[ expando ]; - } catch(e){ - // IE has trouble directly removing the expando - // but it's ok with using removeAttribute - if ( elem.removeAttribute ) - elem.removeAttribute( expando ); - } - - // Completely remove the data cache - delete jQuery.cache[ id ]; - } - }, - - // args is for internal usage only - each: function( object, callback, args ) { - var name, i = 0, length = object.length; - - if ( args ) { - if ( length == undefined ) { - for ( name in object ) - if ( callback.apply( object[ name ], args ) === false ) - break; - } else - for ( ; i < length; ) - if ( callback.apply( object[ i++ ], args ) === false ) - break; - - // A special, fast, case for the most common use of each - } else { - if ( length == undefined ) { - for ( name in object ) - if ( callback.call( object[ name ], name, object[ name ] ) === false ) - break; - } else - for ( var value = object[0]; - i < length && callback.call( value, i, value ) !== false; value = object[++i] ){} - } - - return object; - }, - - prop: function( elem, value, type, i, name ) { - // Handle executable functions - if ( jQuery.isFunction( value ) ) - value = value.call( elem, i ); - - // Handle passing in a number to a CSS property - return value && value.constructor == Number && type == "curCSS" && !exclude.test( name ) ? - value + "px" : - value; - }, - - className: { - // internal only, use addClass("class") - add: function( elem, classNames ) { - jQuery.each((classNames || "").split(/\s+/), function(i, className){ - if ( elem.nodeType == 1 && !jQuery.className.has( elem.className, className ) ) - elem.className += (elem.className ? " " : "") + className; - }); - }, - - // internal only, use removeClass("class") - remove: function( elem, classNames ) { - if (elem.nodeType == 1) - elem.className = classNames != undefined ? - jQuery.grep(elem.className.split(/\s+/), function(className){ - return !jQuery.className.has( classNames, className ); - }).join(" ") : - ""; - }, - - // internal only, use hasClass("class") - has: function( elem, className ) { - return jQuery.inArray( className, (elem.className || elem).toString().split(/\s+/) ) > -1; - } - }, - - // A method for quickly swapping in/out CSS properties to get correct calculations - swap: function( elem, options, callback ) { - var old = {}; - // Remember the old values, and insert the new ones - for ( var name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - callback.call( elem ); - - // Revert the old values - for ( var name in options ) - elem.style[ name ] = old[ name ]; - }, - - css: function( elem, name, force ) { - if ( name == "width" || name == "height" ) { - var val, props = { position: "absolute", visibility: "hidden", display:"block" }, which = name == "width" ? [ "Left", "Right" ] : [ "Top", "Bottom" ]; - - function getWH() { - val = name == "width" ? elem.offsetWidth : elem.offsetHeight; - var padding = 0, border = 0; - jQuery.each( which, function() { - padding += parseFloat(jQuery.curCSS( elem, "padding" + this, true)) || 0; - border += parseFloat(jQuery.curCSS( elem, "border" + this + "Width", true)) || 0; - }); - val -= Math.round(padding + border); - } - - if ( jQuery(elem).is(":visible") ) - getWH(); - else - jQuery.swap( elem, props, getWH ); - - return Math.max(0, val); - } - - return jQuery.curCSS( elem, name, force ); - }, - - curCSS: function( elem, name, force ) { - var ret, style = elem.style; - - // A helper method for determining if an element's values are broken - function color( elem ) { - if ( !jQuery.browser.safari ) - return false; - - // defaultView is cached - var ret = defaultView.getComputedStyle( elem, null ); - return !ret || ret.getPropertyValue("color") == ""; - } - - // We need to handle opacity special in IE - if ( name == "opacity" && jQuery.browser.msie ) { - ret = jQuery.attr( style, "opacity" ); - - return ret == "" ? - "1" : - ret; - } - // Opera sometimes will give the wrong display answer, this fixes it, see #2037 - if ( jQuery.browser.opera && name == "display" ) { - var save = style.outline; - style.outline = "0 solid black"; - style.outline = save; - } - - // Make sure we're using the right name for getting the float value - if ( name.match( /float/i ) ) - name = styleFloat; - - if ( !force && style && style[ name ] ) - ret = style[ name ]; - - else if ( defaultView.getComputedStyle ) { - - // Only "float" is needed here - if ( name.match( /float/i ) ) - name = "float"; - - name = name.replace( /([A-Z])/g, "-$1" ).toLowerCase(); - - var computedStyle = defaultView.getComputedStyle( elem, null ); - - if ( computedStyle && !color( elem ) ) - ret = computedStyle.getPropertyValue( name ); - - // If the element isn't reporting its values properly in Safari - // then some display: none elements are involved - else { - var swap = [], stack = [], a = elem, i = 0; - - // Locate all of the parent display: none elements - for ( ; a && color(a); a = a.parentNode ) - stack.unshift(a); - - // Go through and make them visible, but in reverse - // (It would be better if we knew the exact display type that they had) - for ( ; i < stack.length; i++ ) - if ( color( stack[ i ] ) ) { - swap[ i ] = stack[ i ].style.display; - stack[ i ].style.display = "block"; - } - - // Since we flip the display style, we have to handle that - // one special, otherwise get the value - ret = name == "display" && swap[ stack.length - 1 ] != null ? - "none" : - ( computedStyle && computedStyle.getPropertyValue( name ) ) || ""; - - // Finally, revert the display styles back - for ( i = 0; i < swap.length; i++ ) - if ( swap[ i ] != null ) - stack[ i ].style.display = swap[ i ]; - } - - // We should always get a number back from opacity - if ( name == "opacity" && ret == "" ) - ret = "1"; - - } else if ( elem.currentStyle ) { - var camelCase = name.replace(/\-(\w)/g, function(all, letter){ - return letter.toUpperCase(); - }); - - ret = elem.currentStyle[ name ] || elem.currentStyle[ camelCase ]; - - // From the awesome hack by Dean Edwards - // http://erik.eae.net/archives/2007/07/27/18.54.15/#comment-102291 - - // If we're not dealing with a regular pixel number - // but a number that has a weird ending, we need to convert it to pixels - if ( !/^\d+(px)?$/i.test( ret ) && /^\d/.test( ret ) ) { - // Remember the original values - var left = style.left, rsLeft = elem.runtimeStyle.left; - - // Put in the new values to get a computed value out - elem.runtimeStyle.left = elem.currentStyle.left; - style.left = ret || 0; - ret = style.pixelLeft + "px"; - - // Revert the changed values - style.left = left; - elem.runtimeStyle.left = rsLeft; - } - } - - return ret; - }, - - clean: function( elems, context ) { - var ret = []; - context = context || document; - // !context.createElement fails in IE with an error but returns typeof 'object' - if (typeof context.createElement == 'undefined') - context = context.ownerDocument || context[0] && context[0].ownerDocument || document; - - jQuery.each(elems, function(i, elem){ - if ( !elem ) - return; - - if ( elem.constructor == Number ) - elem += ''; - - // Convert html string into DOM nodes - if ( typeof elem == "string" ) { - // Fix "XHTML"-style tags in all browsers - elem = elem.replace(/(<(\w+)[^>]*?)\/>/g, function(all, front, tag){ - return tag.match(/^(abbr|br|col|img|input|link|meta|param|hr|area|embed)$/i) ? - all : - front + ">"; - }); - - // Trim whitespace, otherwise indexOf won't work as expected - var tags = jQuery.trim( elem ).toLowerCase(), div = context.createElement("div"); - - var wrap = - // option or optgroup - !tags.indexOf("", "" ] || - - !tags.indexOf("", "" ] || - - tags.match(/^<(thead|tbody|tfoot|colg|cap)/) && - [ 1, "", "
      " ] || - - !tags.indexOf("", "" ] || - - // matched above - (!tags.indexOf("", "" ] || - - !tags.indexOf("", "" ] || - - // IE can't serialize and